hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cdfd4009029a872931c69808b1a6a9ea81b8ebbc | 64 | py | Python | venv/lib/python3.6/config-3.6m-x86_64-linux-gnu/python-config.py | JamesMusyoka/Blog | fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5 | [
"Unlicense"
] | 2 | 2019-04-17T13:35:50.000Z | 2021-12-21T00:11:36.000Z | venv/lib/python3.6/config-3.6m-x86_64-linux-gnu/python-config.py | JamesMusyoka/Blog | fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5 | [
"Unlicense"
] | 2 | 2021-03-31T19:51:24.000Z | 2021-06-10T23:05:09.000Z | venv/lib/python3.6/config-3.6m-x86_64-linux-gnu/python-config.py | JamesMusyoka/Blog | fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5 | [
"Unlicense"
] | 2 | 2019-10-01T08:47:35.000Z | 2020-07-11T06:32:16.000Z | /usr/lib/python3.6/config-3.6m-x86_64-linux-gnu/python-config.py | 64 | 64 | 0.796875 |
351397cd23f7b21865b1eca38c7b47473188173d | 1,904 | py | Python | dbScrapy/dbScrapy/middlewares.py | ViterbiDevelopment/Scrapy | c1ea9d8e4c905f82f63763d06fc19c3477310604 | [
"Apache-2.0"
] | null | null | null | dbScrapy/dbScrapy/middlewares.py | ViterbiDevelopment/Scrapy | c1ea9d8e4c905f82f63763d06fc19c3477310604 | [
"Apache-2.0"
] | null | null | null | dbScrapy/dbScrapy/middlewares.py | ViterbiDevelopment/Scrapy | c1ea9d8e4c905f82f63763d06fc19c3477310604 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class DbscrapySpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 33.403509 | 78 | 0.673319 |
bdeebc8be0cd39377ef3e642683de527cd436fce | 926 | py | Python | backend/api/favorites.py | papayaer/zeno | 38cd3b7c5d9180baa91af6d49fbf77bb98a9ac27 | [
"MIT"
] | null | null | null | backend/api/favorites.py | papayaer/zeno | 38cd3b7c5d9180baa91af6d49fbf77bb98a9ac27 | [
"MIT"
] | 7 | 2020-03-24T17:56:41.000Z | 2022-02-26T21:33:12.000Z | backend/api/favorites.py | papayaer/zeno | 38cd3b7c5d9180baa91af6d49fbf77bb98a9ac27 | [
"MIT"
] | null | null | null | from flask import current_app, jsonify, request, url_for
from .api import api
from models import db, Permission, User, Post, Favorite
from .authentication import permission_required
# 收藏了吗?
@api.route('/posts/<int:id>/favoriting/', methods=['GET'])
@permission_required(Permission.FOLLOW)
def has_favorit(user, id):
post = Post.query.get_or_404(id)
isfavorite = user.is_favoriting(post)
return jsonify(isfavorite)
# 加收藏
@api.route('/posts/<int:id>/favorite/', methods=['POST'])
@permission_required(Permission.WRITE)
def new_favorite(user, id):
post = Post.query.get_or_404(id)
user.do_favorite(post)
db.session.commit()
return jsonify(post.id)
# 取消收藏
@api.route('/posts/<int:id>/unfavorite/', methods=['POST'])
@permission_required(Permission.WRITE)
def un_favorite(user, id):
post = Post.query.get_or_404(id)
user.unfavorite(post)
db.session.commit()
return jsonify(post.id) | 28.060606 | 59 | 0.726782 |
417a44aa9bf9babfa2a73212de43645a1a32e61c | 460 | py | Python | server/main.py | sagarmohanty2k00/Stockiva | 60f8fb06a4b414ad3d9c342da02250b5ac37989b | [
"MIT"
] | null | null | null | server/main.py | sagarmohanty2k00/Stockiva | 60f8fb06a4b414ad3d9c342da02250b5ac37989b | [
"MIT"
] | null | null | null | server/main.py | sagarmohanty2k00/Stockiva | 60f8fb06a4b414ad3d9c342da02250b5ac37989b | [
"MIT"
] | 3 | 2022-01-26T16:52:18.000Z | 2022-01-26T17:24:51.000Z | from fastapi import FastAPI
app = FastAPI()
@app.get('/')
def home():
return {"message" : "hello"}
@app.get('/users')
def all_users():
return {"users" : [
{
"name" : "sagar",
"id" : 1,
},
{
"name" : "mohanty",
"id" : 2,
}
]}
@app.post('/users/{id}')
def user(id):
return {
"name" : "sagar",
"id" : 1,
} | 17.037037 | 33 | 0.36087 |
6b97f46ccfdca51e95dea52fcefadeb430e513ac | 126,551 | py | Python | uruler.py | andycranston/uruler | c3dcaa135e672b8a458c045e41f79140317664f0 | [
"MIT"
] | null | null | null | uruler.py | andycranston/uruler | c3dcaa135e672b8a458c045e41f79140317664f0 | [
"MIT"
] | null | null | null | uruler.py | andycranston/uruler | c3dcaa135e672b8a458c045e41f79140317664f0 | [
"MIT"
] | null | null | null | #
# @(!--#) @(#) uruler.py, version 002, 29-july-2020
#
# create a PNG file called "uruler.png"
#
# uses the Pillow library
#
# pip install -U Pillow
#
##################################################################################
#
# imports
#
import sys
import os
from PIL import Image
##################################################################################
SQUAREWIDTH = 350
INNERSQUAREWIDTH = 90
NUMBERSQUARES = 6
PNGFILENAME = 'uruler.png'
##################################################################################
def drawbox(im, x, y, width):
for i in range(0, width):
im.putpixel( (x+i , y+0 ), (0,0,0))
im.putpixel( (x+i , y+(width-1) ), (0,0,0))
im.putpixel( (x+0 , y+i ), (0,0,0))
im.putpixel( (x+(width-1) , y+i ), (0,0,0))
return
##################################################################################
def drawhorizontalpixels(im, x, y, pixelstring):
for c in pixelstring:
if c == '1':
im.putpixel((x,y), (0,0,0))
x += 1
return
##################################################################################
def main():
global progname
digits = []
for i in range(0, NUMBERSQUARES):
digits.append([])
digits[0].append('000000000000000000000000000000000000000011111111111111110000000000000000')
digits[0].append('000000000000000000000000000000001111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000000000001111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000000000001111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000000000001111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000000000001111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000000000001111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000000000001111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000000000001111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000000000001111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000000000001111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000000000000111111111111111111111111111111110000000000000000')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[0].append('000000000000000011111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000000000000000000000000000011111111111111111111111100000000000000000000000000000000')
digits[1].append('0000000000000000000000000000000011111111111111111111111111111111000000000000000000000000')
digits[1].append('0000000000000000000000000000000011111111111111111111111111111111000000000000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111111111111000000000000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[1].append('0000000011111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000000000000000000001111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000000000000000000001111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000000000000000000001111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000000000000000000001111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000000000000000000001111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000000000000000000001111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[1].append('0000000000000000000000000000000011111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000000000000000000011111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000000000000000000011111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000000000000000000011111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000000000000000000011111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000000000000000000011111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000000000000000000011111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000000000000000000011111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111111111111000000000000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111111111111000000000000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111111111111000000000000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111111111111000000000000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111111111111000000000000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111111111111000000000000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111111111111000000000000000000000000')
digits[1].append('0000000000000000000000001111111111111111111111111111111100000000000000000000000000000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111100000000000000000000000000000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111100000000000000000000000000000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111100000000000000000000000000000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111100000000000000000000000000000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111100000000000000000000000000000000')
digits[1].append('0000000000000000111111111111111111111111111111111111111100000000000000000000000000000000')
digits[1].append('0000000000000000111111111111111111111111111111110000000000000000000000000000000000000000')
digits[1].append('0000000000000000111111111111111111111111111111110000000000000000000000000000000000000000')
digits[1].append('0000000000000000111111111111111111111111111111110000000000000000000000000000000000000000')
digits[1].append('0000000000000000111111111111111111111111111111110000000011111111111111111111111111111111')
digits[1].append('0000000000000000111111111111111111111111111111110000000011111111111111111111111111111111')
digits[1].append('0000000000000000111111111111111111111111111111110000000011111111111111111111111111111111')
digits[1].append('0000000000000000111111111111111111111111111111110000000011111111111111111111111111111111')
digits[1].append('0000000000000000111111111111111111111111111111110000000011111111111111111111111111111111')
digits[1].append('0000000000000000111111111111111111111111111111110000000011111111111111111111111111111111')
digits[1].append('0000000000000000111111111111111111111111111111110000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('0000000011111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[1].append('1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000000000000011111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000000000000011111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111000000000000000011111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111000000000000000011111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111000000000000000011111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111000000000000000011111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111110000000000000000')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111110000000000000000')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111110000000000000000')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111110000000000000000')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111110000000000000000')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111110000000000000000')
digits[2].append('00000000000000000000000000000000111111111111111111111111111111110000000000000000')
digits[2].append('00000000000000000000000000000000111111111111111111111111111111110000000000000000')
digits[2].append('00000000000000000000000000000000111111111111111111111111111111110000000000000000')
digits[2].append('00000000000000000000000000000000111111111111111111111111111111110000000000000000')
digits[2].append('00000000000000000000000000000000111111111111111111111111111111110000000000000000')
digits[2].append('00000000000000000000000000000000111111111111111111111111000000000000000000000000')
digits[2].append('00000000000000000000000000000000111111111111111111111111000000000000000000000000')
digits[2].append('00000000000000000000000000000000111111111111111111111111000000000000000000000000')
digits[2].append('00000000000000000000000000000000111111111111111111111111000000000000000000000000')
digits[2].append('00000000000000000000000000000000111111111111111111111111000000000000000000000000')
digits[2].append('00000000000000000000000011111111111111111111111111111111000000000000000000000000')
digits[2].append('00000000000000000000000011111111111111111111111111111111000000000000000000000000')
digits[2].append('00000000000000000000000011111111111111111111111111111111000000000000000000000000')
digits[2].append('00000000000000000000000011111111111111111111111111111111000000000000000000000000')
digits[2].append('00000000000000000000000011111111111111111111111111111111000000000000000000000000')
digits[2].append('00000000000000000000000011111111111111111111111100000000000000000000000000000000')
digits[2].append('00000000000000000000000011111111111111111111111100000000000000000000000000000000')
digits[2].append('00000000000000000000000011111111111111111111111100000000000000000000000000000000')
digits[2].append('00000000000000000000000011111111111111111111111100000000000000000000000000000000')
digits[2].append('00000000000000000000000011111111111111111111111111111111000000000000000000000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000000000000000000011111111000000001111111111111111111111111111111100000000')
digits[2].append('00000000000000000000000011111111000000001111111111111111111111111111111100000000')
digits[2].append('00000000000000000000000011111111000000001111111111111111111111111111111100000000')
digits[2].append('00000000000000000000000000000000000000001111111111111111111111111111111100000000')
digits[2].append('00000000000000000000000000000000000000001111111111111111111111111111111100000000')
digits[2].append('00000000000000000000000000000000000000001111111111111111111111111111111100000000')
digits[2].append('00000000000000000000000000000000000000001111111111111111111111111111111111111111')
digits[2].append('00000000000000000000000000000000000000001111111111111111111111111111111111111111')
digits[2].append('00000000000000000000000000000000000000001111111111111111111111111111111111111111')
digits[2].append('00000000000000000000000000000000000000001111111111111111111111111111111111111111')
digits[2].append('00000000000000000000000000000000000000001111111111111111111111111111111111111111')
digits[2].append('00000000000000000000000000000000000000001111111111111111111111111111111111111111')
digits[2].append('00000000000000000000000000000000000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[2].append('00000000111111111111111111111111111111111111111111111111111111110000000000000000')
digits[2].append('00000000111111111111111111111111111111111111111111111111111111110000000000000000')
digits[2].append('00000000111111111111111111111111111111111111111111111111111111110000000000000000')
digits[2].append('00000000111111111111111111111111111111111111111111111111111111110000000000000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[2].append('00000000000000001111111111111111111111111111111111111111000000000000000000000000')
digits[2].append('00000000000000000000000011111111111111111111111111111111000000000000000000000000')
digits[2].append('00000000000000000000000011111111111111111111111100000000000000000000000000000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111000000001111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111000000001111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111000000001111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111000000001111111111111111111111111111111100000000')
digits[3].append('00000000111111111111111111111111000000001111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111100000000111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111100000000111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111100000000111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111100000000111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111100000000111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111100000000111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111100000000111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111100000000111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111100000000111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111100000000111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111100000000111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111100000000111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111100000000111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111100000000111111111111111111111111111111111111111100000000')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000000000000111111111111111111111111111111111111111100000000')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[3].append('00000000000000000000000011111111111111111111111111111111111111111111111111111111')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111000000000000000000000000000000000000000000000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111000000000000000000000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111000000000000000000000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111000000000000000000000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111100000000000000001111111111111111111111111111111100000000')
digits[4].append('00000000000000001111111100000000000000001111111111111111111111111111111100000000')
digits[4].append('00000000000000000000000000000000000000001111111111111111111111111111111100000000')
digits[4].append('00000000000000000000000000000000000000001111111111111111111111111111111100000000')
digits[4].append('00000000000000000000000000000000000000001111111111111111111111111111111100000000')
digits[4].append('00000000000000000000000000000000000000001111111111111111111111111111111111111111')
digits[4].append('00000000000000000000000000000000000000001111111111111111111111111111111111111111')
digits[4].append('00000000000000000000000000000000000000001111111111111111111111111111111111111111')
digits[4].append('00000000000000000000000000000000000000001111111111111111111111111111111111111111')
digits[4].append('00000000000000000000000000000000000000001111111111111111111111111111111111111111')
digits[4].append('00000000000000000000000000000000000000001111111111111111111111111111111111111111')
digits[4].append('00000000000000000000000000000000000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111000000001111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('11111111111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111111111110000000000000000')
digits[4].append('00000000111111111111111111111111111111111111111111111111000000000000000000000000')
digits[4].append('00000000000000001111111111111111111111111111111111111111000000000000000000000000')
digits[4].append('00000000000000001111111111111111111111111111111111111111000000000000000000000000')
digits[4].append('00000000000000001111111111111111111111111111111111111111000000000000000000000000')
digits[4].append('00000000000000001111111111111111111111111111111111111111000000000000000000000000')
digits[4].append('00000000000000000000000011111111111111111111111100000000000000000000000000000000')
digits[4].append('00000000000000000000000011111111111111111111111100000000000000000000000000000000')
digits[5].append('00000000000000000000000000000000111111111111111111111111000000000000000000000000')
digits[5].append('00000000000000000000000000000000111111111111111111111111000000000000000000000000')
digits[5].append('00000000000000000000000011111111111111111111111111111111111111110000000000000000')
digits[5].append('00000000000000000000000011111111111111111111111111111111111111110000000000000000')
digits[5].append('00000000000000000000000011111111111111111111111111111111111111110000000000000000')
digits[5].append('00000000000000000000000011111111111111111111111111111111111111110000000000000000')
digits[5].append('00000000000000000000000011111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000000000000000000000000000000000000')
digits[5].append('11111111111111111111111111111111111111110000000000000000000000000000000000000000')
digits[5].append('11111111111111111111111111111111111111110000000000000000000000000000000000000000')
digits[5].append('11111111111111111111111111111111111111110000000000000000000000000000000000000000')
digits[5].append('11111111111111111111111111111111111111110000000000000000000000000000000000000000')
digits[5].append('11111111111111111111111111111111111111110000000000000000000000000000000000000000')
digits[5].append('11111111111111111111111111111111111111110000000000000000000000000000000000000000')
digits[5].append('11111111111111111111111111111111111111110000000000000000000000000000000000000000')
digits[5].append('11111111111111111111111111111111111111110000000000000000000000000000000000000000')
digits[5].append('11111111111111111111111111111111111111110000000000000000000000000000000000000000')
digits[5].append('11111111111111111111111111111111111111110000000000000000000000000000000000000000')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111110000000000000000')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111110000000000000000')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111110000000000000000')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111110000000011111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('11111111111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111111111111')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000111111111111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111111111111100000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[5].append('00000000000000001111111111111111111111111111111111111111111111110000000000000000')
digits[5].append('00000000000000000000000011111111111111111111111111111111111111110000000000000000')
digits[5].append('00000000000000000000000011111111111111111111111111111111111111110000000000000000')
digits[5].append('00000000000000000000000011111111111111111111111111111111000000000000000000000000')
digits[5].append('00000000000000000000000000000000111111111111111111111111000000000000000000000000')
### print(digits)
im = Image.new('RGB', (SQUAREWIDTH, SQUAREWIDTH * NUMBERSQUARES), (255, 255, 255))
for box in range(0, NUMBERSQUARES):
x = 0
y = box * SQUAREWIDTH
drawbox(im, x, y, SQUAREWIDTH)
drawbox(im, x+1, y+1, SQUAREWIDTH-2)
for coords in [ (5, 5), (5, 130), (5, 255), (255, 5), (255, 130), (255, 255) ]:
drawbox(im, x+coords[0], y+coords[1], INNERSQUAREWIDTH)
i = 0
for pixels in digits[box]:
# print(box, pixels)
drawhorizontalpixels(im, x+125,y+75+i, pixels)
i += 1
im.save(PNGFILENAME)
return 0
##################################################################################
progname = os.path.basename(sys.argv[0])
sys.exit(main())
# end of file
| 98.94527 | 112 | 0.876967 |
a7e1652029118b8c37ecf1d5259d3bbedb9864cb | 1,091 | py | Python | src2/dispatch/rock_paper_scissor_3.py | unica-isde/isde | a9603d8b8d1a347447cec483108132aa1e8457eb | [
"Apache-2.0"
] | 7 | 2021-01-20T09:11:53.000Z | 2022-03-15T12:19:06.000Z | src2/dispatch/rock_paper_scissor_3.py | unica-isde/isde | a9603d8b8d1a347447cec483108132aa1e8457eb | [
"Apache-2.0"
] | null | null | null | src2/dispatch/rock_paper_scissor_3.py | unica-isde/isde | a9603d8b8d1a347447cec483108132aa1e8457eb | [
"Apache-2.0"
] | 10 | 2020-11-01T09:47:02.000Z | 2021-11-02T12:59:50.000Z | class Weapon:
possible_weapons = {"Paper", "Rock", "Scissor"}
lookup_table = {("Scissor", "Scissor"): "TIE",
("Scissor", "Rock"): "Rock",
("Scissor", "Paper"): "Scissor",
("Rock", "Scissor"): "Rock",
("Rock", "Rock"): "TIE",
("Rock", "Paper"): "Paper",
("Paper", "Scissor"): "Scissor",
("Paper", "Rock"): "Paper",
("Paper", "Paper"): "TIE"
}
def __init__(self, name):
if name in self.possible_weapons:
self.name = name
else:
raise ValueError(name, 'is not a possible weapon')
def __str__(self):
return self.name
def fight_against(self, other_weapon):
return self.lookup_table[self.name, other_weapon.name]
if __name__ == '__main__':
print("\n\n")
weapons = [ Weapon(name) for name in Weapon.possible_weapons ]
for w1 in weapons:
for w2 in weapons:
print(w1, 'vs', w2, '->', w1.fight_against(w2))
| 31.171429 | 66 | 0.484876 |
ecd7606274b625cc79da06208ef55c448756fd5f | 2,082 | py | Python | gui/component/datasource.py | acc-cosc-1336/cosc-1336-fall-2017-RobScaley | 44c807ac95a6348f5643941d745b9232a127610e | [
"MIT"
] | null | null | null | gui/component/datasource.py | acc-cosc-1336/cosc-1336-fall-2017-RobScaley | 44c807ac95a6348f5643941d745b9232a127610e | [
"MIT"
] | null | null | null | gui/component/datasource.py | acc-cosc-1336/cosc-1336-fall-2017-RobScaley | 44c807ac95a6348f5643941d745b9232a127610e | [
"MIT"
] | null | null | null | class DataSource(object):
def __init__(self, parent, diction):
self.data = diction.dictionary
self.parent = parent
self.keys_list = list(self.data.keys())
self.keys_list_index = 0
self.event_listeners = {}
def broadcastEvent(self, event_name):
for listener in self.event_listeners[event_name]:
listener.widget.event_generate(event_name, when='tail')
def addListener(self, listener):
listener.widget.bind(listener.event_name, listener.callback)
listeners = [listener]
if listener.event_name in self.event_listeners.keys():
self.event_listeners[listener.event_name] += listeners
else:
self.event_listeners[listener.event_name] = listeners
def set_current_record(self, key):
self.keys_list_index = self.keys_list.index(key)
self.broadcastEvent("<<navigate_record>>");
def current_record(self):
return self.data[self.keys_list[self.keys_list_index]]
def previous_record(self):
if(self.keys_list_index > 0):
self.keys_list_index -= 1
self.broadcastEvent("<<previous_record>>");
def next_record(self):
if(self.keys_list_index < len(self.keys_list) - 1):
self.keys_list_index += 1
self.broadcastEvent("<<next_record>>");
def request_update(self):
self.broadcastEvent("<<update_record>>");
def request_delete(self):
self.parent.event_generate("<<delete_record>>", when="tail")
def update_record(self, record):
self.data[record[0]] = record
def delete_record(self):
del self.data[self.keys_list[self.keys_list_index]]
del self.keys_list[self.keys_list_index]
if(self.keys_list_index + 1 < len(self.keys_list) - 1):
self.keys_list_index += 1
def request_add(self):
self.parent.event_generate("<<add_record>>", when="tail")
| 29.323944 | 69 | 0.607589 |
10165f160b8e12f86adc49c98b5204d470b5de93 | 84 | py | Python | typings/sys_info.py | Argmaster/PyR3 | 6786bcb6a101fe4bd4cc50fe43767b8178504b15 | [
"MIT"
] | 2 | 2021-12-12T18:51:52.000Z | 2022-02-23T09:49:16.000Z | src/blender/blender_autocomplete-master/2.92/sys_info.py | JonasWard/ClayAdventures | a716445ac690e4792e70658319aa1d5299f9c9e9 | [
"MIT"
] | 2 | 2021-11-08T12:09:02.000Z | 2021-12-12T23:01:12.000Z | src/blender/blender_autocomplete-master/2.92/sys_info.py | JonasWard/ClayAdventures | a716445ac690e4792e70658319aa1d5299f9c9e9 | [
"MIT"
] | null | null | null | import sys
import typing
def write_sysinfo(filepath):
'''
'''
pass
| 7.636364 | 28 | 0.583333 |
c97b6e1178f1aeb136c35a9401911545962739a6 | 27,914 | py | Python | vnpy/gateway/ctp/ctp_gateway.py | ZJMXX/vnpy | 161d88774c7a161a4d024a9daa50447a73a1c43e | [
"MIT"
] | 2 | 2021-04-22T09:44:19.000Z | 2021-04-22T17:23:16.000Z | vnpy/gateway/ctp/ctp_gateway.py | ZJMXX/vnpy | 161d88774c7a161a4d024a9daa50447a73a1c43e | [
"MIT"
] | null | null | null | vnpy/gateway/ctp/ctp_gateway.py | ZJMXX/vnpy | 161d88774c7a161a4d024a9daa50447a73a1c43e | [
"MIT"
] | null | null | null | """
"""
import sys
import pytz
from datetime import datetime
from time import sleep
from vnpy.api.ctp import (
MdApi,
TdApi,
THOST_FTDC_OAS_Submitted,
THOST_FTDC_OAS_Accepted,
THOST_FTDC_OAS_Rejected,
THOST_FTDC_OST_NoTradeQueueing,
THOST_FTDC_OST_PartTradedQueueing,
THOST_FTDC_OST_AllTraded,
THOST_FTDC_OST_Canceled,
THOST_FTDC_D_Buy,
THOST_FTDC_D_Sell,
THOST_FTDC_PD_Long,
THOST_FTDC_PD_Short,
THOST_FTDC_OPT_LimitPrice,
THOST_FTDC_OPT_AnyPrice,
THOST_FTDC_OF_Open,
THOST_FTDC_OFEN_Close,
THOST_FTDC_OFEN_CloseYesterday,
THOST_FTDC_OFEN_CloseToday,
THOST_FTDC_PC_Futures,
THOST_FTDC_PC_Options,
THOST_FTDC_PC_SpotOption,
THOST_FTDC_PC_Combination,
THOST_FTDC_CP_CallOptions,
THOST_FTDC_CP_PutOptions,
THOST_FTDC_HF_Speculation,
THOST_FTDC_CC_Immediately,
THOST_FTDC_FCC_NotForceClose,
THOST_FTDC_TC_GFD,
THOST_FTDC_VC_AV,
THOST_FTDC_TC_IOC,
THOST_FTDC_VC_CV,
THOST_FTDC_AF_Delete
)
from vnpy.trader.constant import (
Direction,
Offset,
Exchange,
OrderType,
Product,
Status,
OptionType
)
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
OrderRequest,
CancelRequest,
SubscribeRequest,
)
from vnpy.trader.utility import get_folder_path
from vnpy.trader.event import EVENT_TIMER
STATUS_CTP2VT = {
THOST_FTDC_OAS_Submitted: Status.SUBMITTING,
THOST_FTDC_OAS_Accepted: Status.SUBMITTING,
THOST_FTDC_OAS_Rejected: Status.REJECTED,
THOST_FTDC_OST_NoTradeQueueing: Status.NOTTRADED,
THOST_FTDC_OST_PartTradedQueueing: Status.PARTTRADED,
THOST_FTDC_OST_AllTraded: Status.ALLTRADED,
THOST_FTDC_OST_Canceled: Status.CANCELLED
}
DIRECTION_VT2CTP = {
Direction.LONG: THOST_FTDC_D_Buy,
Direction.SHORT: THOST_FTDC_D_Sell
}
DIRECTION_CTP2VT = {v: k for k, v in DIRECTION_VT2CTP.items()}
DIRECTION_CTP2VT[THOST_FTDC_PD_Long] = Direction.LONG
DIRECTION_CTP2VT[THOST_FTDC_PD_Short] = Direction.SHORT
ORDERTYPE_VT2CTP = {
OrderType.LIMIT: THOST_FTDC_OPT_LimitPrice,
OrderType.MARKET: THOST_FTDC_OPT_AnyPrice
}
ORDERTYPE_CTP2VT = {v: k for k, v in ORDERTYPE_VT2CTP.items()}
OFFSET_VT2CTP = {
Offset.OPEN: THOST_FTDC_OF_Open,
Offset.CLOSE: THOST_FTDC_OFEN_Close,
Offset.CLOSETODAY: THOST_FTDC_OFEN_CloseToday,
Offset.CLOSEYESTERDAY: THOST_FTDC_OFEN_CloseYesterday,
}
OFFSET_CTP2VT = {v: k for k, v in OFFSET_VT2CTP.items()}
EXCHANGE_CTP2VT = {
"CFFEX": Exchange.CFFEX,
"SHFE": Exchange.SHFE,
"CZCE": Exchange.CZCE,
"DCE": Exchange.DCE,
"INE": Exchange.INE
}
PRODUCT_CTP2VT = {
THOST_FTDC_PC_Futures: Product.FUTURES,
THOST_FTDC_PC_Options: Product.OPTION,
THOST_FTDC_PC_SpotOption: Product.OPTION,
THOST_FTDC_PC_Combination: Product.SPREAD
}
OPTIONTYPE_CTP2VT = {
THOST_FTDC_CP_CallOptions: OptionType.CALL,
THOST_FTDC_CP_PutOptions: OptionType.PUT
}
MAX_FLOAT = sys.float_info.max
CHINA_TZ = pytz.timezone("Asia/Shanghai")
symbol_exchange_map = {}
symbol_name_map = {}
symbol_size_map = {}
class CtpGateway(BaseGateway):
"""
VN Trader Gateway for CTP .
"""
default_setting = {
"用户名": "",
"密码": "",
"经纪商代码": "",
"交易服务器": "",
"行情服务器": "",
"产品名称": "",
"授权编码": "",
"产品信息": ""
}
exchanges = list(EXCHANGE_CTP2VT.values())
def __init__(self, event_engine):
"""Constructor"""
super().__init__(event_engine, "CTP")
self.td_api = CtpTdApi(self)
self.md_api = CtpMdApi(self)
def connect(self, setting: dict):
""""""
userid = setting["用户名"]
password = setting["密码"]
brokerid = setting["经纪商代码"]
td_address = setting["交易服务器"]
md_address = setting["行情服务器"]
appid = setting["产品名称"]
auth_code = setting["授权编码"]
product_info = setting["产品信息"]
if (
(not td_address.startswith("tcp://"))
and (not td_address.startswith("ssl://"))
):
td_address = "tcp://" + td_address
if (
(not md_address.startswith("tcp://"))
and (not md_address.startswith("ssl://"))
):
md_address = "tcp://" + md_address
self.td_api.connect(td_address, userid, password, brokerid, auth_code, appid, product_info)
self.md_api.connect(md_address, userid, password, brokerid)
self.init_query()
def subscribe(self, req: SubscribeRequest):
""""""
self.md_api.subscribe(req)
def send_order(self, req: OrderRequest):
""""""
if req.type == OrderType.RFQ:
vt_orderid = self.td_api.send_rfq(req)
else:
vt_orderid = self.td_api.send_order(req)
return vt_orderid
def cancel_order(self, req: CancelRequest):
""""""
self.td_api.cancel_order(req)
def query_account(self):
""""""
self.td_api.query_account()
def query_position(self):
""""""
self.td_api.query_position()
def close(self):
""""""
self.td_api.close()
self.md_api.close()
def write_error(self, msg: str, error: dict):
""""""
error_id = error["ErrorID"]
error_msg = error["ErrorMsg"]
msg = f"{msg},代码:{error_id},信息:{error_msg}"
self.write_log(msg)
def process_timer_event(self, event):
""""""
self.count += 1
if self.count < 2:
return
self.count = 0
func = self.query_functions.pop(0)
func()
self.query_functions.append(func)
self.md_api.update_date()
def init_query(self):
""""""
self.count = 0
self.query_functions = [self.query_account, self.query_position]
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
class CtpMdApi(MdApi):
""""""
def __init__(self, gateway):
"""Constructor"""
super(CtpMdApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.reqid = 0
self.connect_status = False
self.login_status = False
self.subscribed = set()
self.userid = ""
self.password = ""
self.brokerid = ""
self.current_date = datetime.now().strftime("%Y%m%d")
def onFrontConnected(self):
"""
Callback when front server is connected.
"""
self.gateway.write_log("行情服务器连接成功")
self.login()
def onFrontDisconnected(self, reason: int):
"""
Callback when front server is disconnected.
"""
self.login_status = False
self.gateway.write_log(f"行情服务器连接断开,原因{reason}")
def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback when user is logged in.
"""
if not error["ErrorID"]:
self.login_status = True
self.gateway.write_log("行情服务器登录成功")
for symbol in self.subscribed:
self.subscribeMarketData(symbol)
else:
self.gateway.write_error("行情服务器登录失败", error)
def onRspError(self, error: dict, reqid: int, last: bool):
"""
Callback when error occured.
"""
self.gateway.write_error("行情接口报错", error)
def onRspSubMarketData(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error or not error["ErrorID"]:
return
self.gateway.write_error("行情订阅失败", error)
def onRtnDepthMarketData(self, data: dict):
"""
Callback of tick data update.
"""
# Filter data update with no timestamp
if not data["UpdateTime"]:
return
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
return
timestamp = f"{self.current_date} {data['UpdateTime']}.{int(data['UpdateMillisec']/100)}"
dt = datetime.strptime(timestamp, "%Y%m%d %H:%M:%S.%f")
dt = CHINA_TZ.localize(dt)
tick = TickData(
symbol=symbol,
exchange=exchange,
datetime=dt,
name=symbol_name_map[symbol],
volume=data["Volume"],
open_interest=data["OpenInterest"],
last_price=adjust_price(data["LastPrice"]),
limit_up=data["UpperLimitPrice"],
limit_down=data["LowerLimitPrice"],
open_price=adjust_price(data["OpenPrice"]),
high_price=adjust_price(data["HighestPrice"]),
low_price=adjust_price(data["LowestPrice"]),
pre_close=adjust_price(data["PreClosePrice"]),
bid_price_1=adjust_price(data["BidPrice1"]),
ask_price_1=adjust_price(data["AskPrice1"]),
bid_volume_1=data["BidVolume1"],
ask_volume_1=data["AskVolume1"],
gateway_name=self.gateway_name
)
if data["BidVolume2"] or data["AskVolume2"]:
tick.bid_price_2 = adjust_price(data["BidPrice2"])
tick.bid_price_3 = adjust_price(data["BidPrice3"])
tick.bid_price_4 = adjust_price(data["BidPrice4"])
tick.bid_price_5 = adjust_price(data["BidPrice5"])
tick.ask_price_2 = adjust_price(data["AskPrice2"])
tick.ask_price_3 = adjust_price(data["AskPrice3"])
tick.ask_price_4 = adjust_price(data["AskPrice4"])
tick.ask_price_5 = adjust_price(data["AskPrice5"])
tick.bid_volume_2 = data["BidVolume2"]
tick.bid_volume_3 = data["BidVolume3"]
tick.bid_volume_4 = data["BidVolume4"]
tick.bid_volume_5 = data["BidVolume5"]
tick.ask_volume_2 = data["AskVolume2"]
tick.ask_volume_3 = data["AskVolume3"]
tick.ask_volume_4 = data["AskVolume4"]
tick.ask_volume_5 = data["AskVolume5"]
self.gateway.on_tick(tick)
def connect(self, address: str, userid: str, password: str, brokerid: int):
"""
Start connection to server.
"""
self.userid = userid
self.password = password
self.brokerid = brokerid
# If not connected, then start connection first.
if not self.connect_status:
path = get_folder_path(self.gateway_name.lower())
self.createFtdcMdApi((str(path) + "\\Md").encode("GBK"))
self.registerFront(address)
self.init()
self.connect_status = True
# If already connected, then login immediately.
elif not self.login_status:
self.login()
def login(self):
"""
Login onto server.
"""
req = {
"UserID": self.userid,
"Password": self.password,
"BrokerID": self.brokerid
}
self.reqid += 1
self.reqUserLogin(req, self.reqid)
def subscribe(self, req: SubscribeRequest):
"""
Subscribe to tick data update.
"""
if self.login_status:
self.subscribeMarketData(req.symbol)
self.subscribed.add(req.symbol)
def close(self):
"""
Close the connection.
"""
if self.connect_status:
self.exit()
def update_date(self):
""""""
self.current_date = datetime.now().strftime("%Y%m%d")
class CtpTdApi(TdApi):
""""""
def __init__(self, gateway):
"""Constructor"""
super(CtpTdApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.reqid = 0
self.order_ref = 0
self.connect_status = False
self.login_status = False
self.auth_status = False
self.login_failed = False
self.contract_inited = False
self.userid = ""
self.password = ""
self.brokerid = ""
self.auth_code = ""
self.appid = ""
self.product_info = ""
self.frontid = 0
self.sessionid = 0
self.order_data = []
self.trade_data = []
self.positions = {}
self.sysid_orderid_map = {}
def onFrontConnected(self):
""""""
self.gateway.write_log("交易服务器连接成功")
if self.auth_code:
self.authenticate()
else:
self.login()
def onFrontDisconnected(self, reason: int):
""""""
self.login_status = False
self.gateway.write_log(f"交易服务器连接断开,原因{reason}")
def onRspAuthenticate(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error['ErrorID']:
self.auth_status = True
self.gateway.write_log("交易服务器授权验证成功")
self.login()
else:
self.gateway.write_error("交易服务器授权验证失败", error)
def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error["ErrorID"]:
self.frontid = data["FrontID"]
self.sessionid = data["SessionID"]
self.login_status = True
self.gateway.write_log("交易服务器登录成功")
# Confirm settlement
req = {
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqSettlementInfoConfirm(req, self.reqid)
else:
self.login_failed = True
self.gateway.write_error("交易服务器登录失败", error)
def onRspOrderInsert(self, data: dict, error: dict, reqid: int, last: bool):
""""""
order_ref = data["OrderRef"]
orderid = f"{self.frontid}_{self.sessionid}_{order_ref}"
symbol = data["InstrumentID"]
exchange = symbol_exchange_map[symbol]
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=orderid,
direction=DIRECTION_CTP2VT[data["Direction"]],
offset=OFFSET_CTP2VT.get(data["CombOffsetFlag"], Offset.NONE),
price=data["LimitPrice"],
volume=data["VolumeTotalOriginal"],
status=Status.REJECTED,
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
self.gateway.write_error("交易委托失败", error)
def onRspOrderAction(self, data: dict, error: dict, reqid: int, last: bool):
""""""
self.gateway.write_error("交易撤单失败", error)
def onRspQueryMaxOrderVolume(self, data: dict, error: dict, reqid: int, last: bool):
""""""
pass
def onRspSettlementInfoConfirm(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback of settlment info confimation.
"""
self.gateway.write_log("结算信息确认成功")
while True:
self.reqid += 1
n = self.reqQryInstrument({}, self.reqid)
if not n:
break
else:
sleep(1)
def onRspQryInvestorPosition(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not data:
return
# Check if contract data received
if data["InstrumentID"] in symbol_exchange_map:
# Get buffered position object
key = f"{data['InstrumentID'], data['PosiDirection']}"
position = self.positions.get(key, None)
if not position:
position = PositionData(
symbol=data["InstrumentID"],
exchange=symbol_exchange_map[data["InstrumentID"]],
direction=DIRECTION_CTP2VT[data["PosiDirection"]],
gateway_name=self.gateway_name
)
self.positions[key] = position
# For SHFE and INE position data update
if position.exchange in [Exchange.SHFE, Exchange.INE]:
if data["YdPosition"] and not data["TodayPosition"]:
position.yd_volume = data["Position"]
# For other exchange position data update
else:
position.yd_volume = data["Position"] - data["TodayPosition"]
# Get contract size (spread contract has no size value)
size = symbol_size_map.get(position.symbol, 0)
# Calculate previous position cost
cost = position.price * position.volume * size
# Update new position volume
position.volume += data["Position"]
position.pnl += data["PositionProfit"]
# Calculate average position price
if position.volume and size:
cost += data["PositionCost"]
position.price = cost / (position.volume * size)
# Get frozen volume
if position.direction == Direction.LONG:
position.frozen += data["ShortFrozen"]
else:
position.frozen += data["LongFrozen"]
if last:
for position in self.positions.values():
self.gateway.on_position(position)
self.positions.clear()
def onRspQryTradingAccount(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if "AccountID" not in data:
return
account = AccountData(
accountid=data["AccountID"],
balance=data["Balance"],
frozen=data["FrozenMargin"] + data["FrozenCash"] + data["FrozenCommission"],
gateway_name=self.gateway_name
)
account.available = data["Available"]
self.gateway.on_account(account)
def onRspQryInstrument(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback of instrument query.
"""
product = PRODUCT_CTP2VT.get(data["ProductClass"], None)
if product:
contract = ContractData(
symbol=data["InstrumentID"],
exchange=EXCHANGE_CTP2VT[data["ExchangeID"]],
name=data["InstrumentName"],
product=product,
size=data["VolumeMultiple"],
pricetick=data["PriceTick"],
gateway_name=self.gateway_name,
market_supported=True
)
if contract.exchange == Exchange.SHFE:
contract.market_supported = False
# For option only
if contract.product == Product.OPTION:
# Remove C/P suffix of CZCE option product name
if contract.exchange == Exchange.CZCE:
contract.option_portfolio = data["ProductID"][:-1]
else:
contract.option_portfolio = data["ProductID"]
contract.option_underlying = data["UnderlyingInstrID"]
contract.option_type = OPTIONTYPE_CTP2VT.get(data["OptionsType"], None)
contract.option_strike = data["StrikePrice"]
contract.option_index = str(data["StrikePrice"])
contract.option_expiry = datetime.strptime(data["ExpireDate"], "%Y%m%d")
self.gateway.on_contract(contract)
symbol_exchange_map[contract.symbol] = contract.exchange
symbol_name_map[contract.symbol] = contract.name
symbol_size_map[contract.symbol] = contract.size
if last:
self.contract_inited = True
self.gateway.write_log("合约信息查询成功")
for data in self.order_data:
self.onRtnOrder(data)
self.order_data.clear()
for data in self.trade_data:
self.onRtnTrade(data)
self.trade_data.clear()
def onRtnOrder(self, data: dict):
"""
Callback of order status update.
"""
if not self.contract_inited:
self.order_data.append(data)
return
symbol = data["InstrumentID"]
exchange = symbol_exchange_map[symbol]
frontid = data["FrontID"]
sessionid = data["SessionID"]
order_ref = data["OrderRef"]
orderid = f"{frontid}_{sessionid}_{order_ref}"
timestamp = f"{data['InsertDate']} {data['InsertTime']}"
dt = datetime.strptime(timestamp, "%Y%m%d %H:%M:%S")
dt = CHINA_TZ.localize(dt)
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=orderid,
type=ORDERTYPE_CTP2VT[data["OrderPriceType"]],
direction=DIRECTION_CTP2VT[data["Direction"]],
offset=OFFSET_CTP2VT[data["CombOffsetFlag"]],
price=data["LimitPrice"],
volume=data["VolumeTotalOriginal"],
traded=data["VolumeTraded"],
status=STATUS_CTP2VT[data["OrderStatus"]],
datetime=dt,
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
self.sysid_orderid_map[data["OrderSysID"]] = orderid
def onRtnTrade(self, data: dict):
"""
Callback of trade status update.
"""
if not self.contract_inited:
self.trade_data.append(data)
return
symbol = data["InstrumentID"]
exchange = symbol_exchange_map[symbol]
orderid = self.sysid_orderid_map[data["OrderSysID"]]
timestamp = f"{data['TradeDate']} {data['TradeTime']}"
dt = datetime.strptime(timestamp, "%Y%m%d %H:%M:%S")
dt = CHINA_TZ.localize(dt)
trade = TradeData(
symbol=symbol,
exchange=exchange,
orderid=orderid,
tradeid=data["TradeID"],
direction=DIRECTION_CTP2VT[data["Direction"]],
offset=OFFSET_CTP2VT[data["OffsetFlag"]],
price=data["Price"],
volume=data["Volume"],
datetime=dt,
gateway_name=self.gateway_name
)
self.gateway.on_trade(trade)
def onRspForQuoteInsert(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error["ErrorID"]:
symbol = data["InstrumentID"]
msg = f"{symbol}询价请求发送成功"
self.gateway.write_log(msg)
else:
self.gateway.write_error("询价请求发送失败", error)
def connect(
self,
address: str,
userid: str,
password: str,
brokerid: int,
auth_code: str,
appid: str,
product_info
):
"""
Start connection to server.
"""
self.userid = userid
self.password = password
self.brokerid = brokerid
self.auth_code = auth_code
self.appid = appid
self.product_info = product_info
if not self.connect_status:
path = get_folder_path(self.gateway_name.lower())
self.createFtdcTraderApi((str(path) + "\\Td").encode("GBK"))
self.subscribePrivateTopic(0)
self.subscribePublicTopic(0)
self.registerFront(address)
self.init()
self.connect_status = True
else:
self.authenticate()
def authenticate(self):
"""
Authenticate with auth_code and appid.
"""
req = {
"UserID": self.userid,
"BrokerID": self.brokerid,
"AuthCode": self.auth_code,
"AppID": self.appid
}
if self.product_info:
req["UserProductInfo"] = self.product_info
self.reqid += 1
self.reqAuthenticate(req, self.reqid)
def login(self):
"""
Login onto server.
"""
if self.login_failed:
return
req = {
"UserID": self.userid,
"Password": self.password,
"BrokerID": self.brokerid,
"AppID": self.appid
}
if self.product_info:
req["UserProductInfo"] = self.product_info
self.reqid += 1
self.reqUserLogin(req, self.reqid)
def send_order(self, req: OrderRequest):
"""
Send new order.
"""
if req.offset not in OFFSET_VT2CTP:
self.gateway.write_log("请选择开平方向")
return ""
if req.type not in ORDERTYPE_VT2CTP:
self.gateway.write_log(f"当前接口不支持该类型的委托{req.type.value}")
return ""
self.order_ref += 1
ctp_req = {
"InstrumentID": req.symbol,
"ExchangeID": req.exchange.value,
"LimitPrice": req.price,
"VolumeTotalOriginal": int(req.volume),
"OrderPriceType": ORDERTYPE_VT2CTP.get(req.type, ""),
"Direction": DIRECTION_VT2CTP.get(req.direction, ""),
"CombOffsetFlag": OFFSET_VT2CTP.get(req.offset, ""),
"OrderRef": str(self.order_ref),
"InvestorID": self.userid,
"UserID": self.userid,
"BrokerID": self.brokerid,
"CombHedgeFlag": THOST_FTDC_HF_Speculation,
"ContingentCondition": THOST_FTDC_CC_Immediately,
"ForceCloseReason": THOST_FTDC_FCC_NotForceClose,
"IsAutoSuspend": 0,
"TimeCondition": THOST_FTDC_TC_GFD,
"VolumeCondition": THOST_FTDC_VC_AV,
"MinVolume": 1
}
if req.type == OrderType.FAK:
ctp_req["OrderPriceType"] = THOST_FTDC_OPT_LimitPrice
ctp_req["TimeCondition"] = THOST_FTDC_TC_IOC
ctp_req["VolumeCondition"] = THOST_FTDC_VC_AV
elif req.type == OrderType.FOK:
ctp_req["OrderPriceType"] = THOST_FTDC_OPT_LimitPrice
ctp_req["TimeCondition"] = THOST_FTDC_TC_IOC
ctp_req["VolumeCondition"] = THOST_FTDC_VC_CV
self.reqid += 1
self.reqOrderInsert(ctp_req, self.reqid)
orderid = f"{self.frontid}_{self.sessionid}_{self.order_ref}"
order = req.create_order_data(orderid, self.gateway_name)
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
"""
Cancel existing order.
"""
frontid, sessionid, order_ref = req.orderid.split("_")
ctp_req = {
"InstrumentID": req.symbol,
"ExchangeID": req.exchange.value,
"OrderRef": order_ref,
"FrontID": int(frontid),
"SessionID": int(sessionid),
"ActionFlag": THOST_FTDC_AF_Delete,
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqOrderAction(ctp_req, self.reqid)
def send_rfq(self, req: OrderRequest) -> str:
""""""
self.order_ref += 1
ctp_req = {
"InstrumentID": req.symbol,
"ExchangeID": req.exchange.value,
"ForQuoteRef": str(self.order_ref),
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqForQuoteInsert(ctp_req, self.reqid)
orderid = f"{self.frontid}_{self.sessionid}_{self.order_ref}"
vt_orderid = f"{self.gateway_name}.{orderid}"
return vt_orderid
def query_account(self):
"""
Query account balance data.
"""
self.reqid += 1
self.reqQryTradingAccount({}, self.reqid)
def query_position(self):
"""
Query position holding data.
"""
if not symbol_exchange_map:
return
req = {
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqQryInvestorPosition(req, self.reqid)
def close(self):
""""""
if self.connect_status:
self.exit()
def adjust_price(price: float) -> float:
""""""
if price == MAX_FLOAT:
price = 0
return price
| 29.854545 | 99 | 0.57953 |
578f96ce3d1adebce9ff7565e2a61f42c0a4c635 | 1,761 | py | Python | import data/abstract.py | elenisproject/internet-and-applications | 4295c8c0c5af26793546e7cb0fe6e727966ea1a2 | [
"MIT"
] | 2 | 2021-03-15T15:07:00.000Z | 2021-03-18T11:21:22.000Z | import data/abstract.py | elenisproject/internet-and-applications | 4295c8c0c5af26793546e7cb0fe6e727966ea1a2 | [
"MIT"
] | 1 | 2021-03-31T20:07:42.000Z | 2021-03-31T20:07:42.000Z | import data/abstract.py | elenisproject/internet-and-applications | 4295c8c0c5af26793546e7cb0fe6e727966ea1a2 | [
"MIT"
] | null | null | null | #------------------------------------------ EXTRACT DATA FROM pdf_json -------------------------------------------------
#
# Insert abstract from json files into our database,
# primary key(paper_id,spot), foreign key:(paper_id)
# we are going to have 4 columns: paper_id, spot, body, section
#
#--------------------------------------------------------------------------------------------------------------------------------
import pandas as pd
import unicodedata
import re
import string
import csv
import os, json
import itertools
from joblib import Parallel, delayed
import collections
from collections import Counter,defaultdict,OrderedDict,namedtuple
import mysql.connector
from settings import DB_CREDS
cnx = mysql.connector.connect(
host = DB_CREDS['host'],
user = DB_CREDS['user'],
passwd = DB_CREDS['pass'],
database = DB_CREDS['db']
)
cursor = cnx.cursor()
#get all json files from pdf_json folder
path_to_json = 'pdf_json/'
json_files = [pos_json for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')]
for index, js in enumerate(json_files):
with open(os.path.join(path_to_json, js)) as json_file:
json_text = json.load(json_file)
paper_id = json_text['paper_id']
#get body text
for spot, j in enumerate(json_text['abstract']):
text = json_text['abstract'][spot]['text']
section = json_text['abstract'][spot]['section']
text_data=(paper_id,spot,text,section)
add_data = ("INSERT IGNORE INTO abstract "
"(paper_id,spot,body,section)"
"VALUES (%s,%s,%s,%s)")
cursor.execute(add_data,text_data)
cnx.commit()
cursor.close()
cnx.close() | 35.938776 | 129 | 0.579216 |
ea4013cec98debdf437b500dfa77bb9daeea0c87 | 3,478 | py | Python | tests/common/helpers/drop_counters/fanout_drop_counter.py | xwjiang2021/sonic-mgmt | 82c446b9fb016eb070af765aa9d9999e55b27342 | [
"Apache-2.0"
] | 2 | 2021-11-24T09:33:41.000Z | 2021-12-03T09:08:29.000Z | tests/common/helpers/drop_counters/fanout_drop_counter.py | xwjiang2021/sonic-mgmt | 82c446b9fb016eb070af765aa9d9999e55b27342 | [
"Apache-2.0"
] | null | null | null | tests/common/helpers/drop_counters/fanout_drop_counter.py | xwjiang2021/sonic-mgmt | 82c446b9fb016eb070af765aa9d9999e55b27342 | [
"Apache-2.0"
] | null | null | null | import re
import logging
from abc import abstractmethod
logger = logging.getLogger(__name__)
MAX_OPENFLOW_RULE_ID = 65535
DEVICE_PORT_VLANS = 'device_port_vlans'
TRUNK = 'Trunk'
MODE = "mode"
FAILED = 'failed'
INVOCATION = 'invocation'
STDOUT = 'stdout'
class FanoutDropCounter:
def __init__(self):
self.fanout_graph_facts = None
@abstractmethod
def prepare_config(self, fanout_graph_facts, match_mac, set_mac, eth_field):
pass
@abstractmethod
def restore_drop_counter_config(self):
pass
class FanoutOnyxDropCounter(FanoutDropCounter):
def __init__(self, onyx_switch):
FanoutDropCounter.__init__(self)
self.onyx_switch = onyx_switch
def prepare_config(self, fanout_graph_facts, match_mac, set_mac, eth_field):
self.fanout_graph_facts = fanout_graph_facts
trunk_port = self._get_trunk_port_to_server()
openflow_port_id = self._get_openflow_port_id(trunk_port)
cmd = 'openflow add-flows {rule_id} table=0,priority=10,dl_src={match_mac},' \
'in_port={openflow_port_id},actions=set_field:{set_mac}->{eth_field}'
out = self.onyx_switch.host.onyx_config(lines=[cmd.format(
rule_id=MAX_OPENFLOW_RULE_ID, match_mac=match_mac,
openflow_port_id=openflow_port_id, set_mac=set_mac, eth_field=eth_field)])
if FAILED in out and out[FAILED]:
logger.error('Failed to set openflow rule - {}'.format(out['msg']))
return False
logger.debug('Setting openflow rule succeed from onyx: {}'.format(out))
return True
def _get_trunk_port_to_server(self):
fanout_trunk_port = None
for iface, iface_info in self.fanout_graph_facts[self.onyx_switch.hostname][DEVICE_PORT_VLANS].items():
if iface_info[MODE] == TRUNK:
fanout_trunk_port = iface.split('/')[-1]
break
return fanout_trunk_port
def _get_openflow_port_id(self, port):
out = self.onyx_switch.host.onyx_command(
commands=['show openflow'])[self.onyx_switch.hostname]
if FAILED in out and out[FAILED]:
logger.error('Failed to get openflow table- {}'.format(out['msg']))
show_openflow = out[STDOUT][0]
return self._get_openflow_port_id_from_show_openflow(show_openflow, port)
@staticmethod
def _get_openflow_port_id_from_show_openflow(show_openflow, port):
regexp = 'Eth1/{}\s*OF-(\d+)'.format(port)
match = re.search(regexp, show_openflow)
if match:
return match.group(1)
else:
raise Exception('Can not find openflow port id for port {}. Show openflow output: {}'.format(
port, show_openflow))
def restore_drop_counter_config(self):
"""Delete configuraion for drop_packets tests if fanout has onyx OS
Affected tests:test_equal_smac_dmac_drop, test_multicast_smac_drop
Returns:
boolean: True if success. Usually, the method return False only if the operation
is not supported or failed.
"""
cmd = 'openflow del-flows {}'.format(MAX_OPENFLOW_RULE_ID)
out = self.onyx_switch.host.onyx_config(lines=[cmd])
if FAILED in out and out[FAILED]:
logger.error('Failed to remove openflow rule - {}'.format(out['msg']))
return False
logger.debug('Removing openflow rule succeed from onyx: {}'.format(out))
return True
| 38.21978 | 111 | 0.672513 |
43ce43fd217de23d22b10038183b5ccf945d3bef | 1,185 | py | Python | datumaro/plugins/openvino_plugin/samples/mobilenet_v2_pytorch_interp.py | einstonlabs/datumaro | 9eb5246febb4b4ae10c321fae80413bb87fb1a7d | [
"MIT"
] | null | null | null | datumaro/plugins/openvino_plugin/samples/mobilenet_v2_pytorch_interp.py | einstonlabs/datumaro | 9eb5246febb4b4ae10c321fae80413bb87fb1a7d | [
"MIT"
] | null | null | null | datumaro/plugins/openvino_plugin/samples/mobilenet_v2_pytorch_interp.py | einstonlabs/datumaro | 9eb5246febb4b4ae10c321fae80413bb87fb1a7d | [
"MIT"
] | 9 | 2021-05-17T07:00:03.000Z | 2021-06-26T02:15:10.000Z | # Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
from datumaro.components.extractor import Label, LabelCategories, AnnotationType
from datumaro.util.annotation_util import softmax
def process_outputs(inputs, outputs):
# inputs = model input; array or images; shape = (B, H, W, C)
# outputs = model output; shape = (1, 1, N, 7); N is the number of detected bounding boxes.
# det = [image_id, label(class id), conf, x_min, y_min, x_max, y_max]
# results = conversion result; [[ Annotation, ... ], ... ]
results = []
for input, output in zip(inputs, outputs):
image_results = []
output = softmax(output).tolist()
label = output.index(max(output))
image_results.append(Label(label=label, attributes={"scores": output}))
results.append(image_results)
return results
def get_categories():
# output categories - label map etc.
label_categories = LabelCategories()
with open("samples/imagenet.class", "r") as file:
for line in file.readlines():
label = line.strip()
label_categories.add(label)
return {AnnotationType.label: label_categories}
| 30.384615 | 95 | 0.664979 |
462170ac035dad5d578550ac96d59d58b5058076 | 12,961 | py | Python | gbpservice/contrib/tests/unit/nfp/configurator/agents/test_generic_config.py | noironetworks/group-based-policy | 58a4be9a90ed4833551ae7ce7a029f2ec065bd29 | [
"Apache-2.0"
] | 2 | 2017-12-13T10:33:28.000Z | 2019-07-03T19:01:42.000Z | gbpservice/contrib/tests/unit/nfp/configurator/agents/test_generic_config.py | noironetworks/group-based-policy | 58a4be9a90ed4833551ae7ce7a029f2ec065bd29 | [
"Apache-2.0"
] | 4 | 2015-04-14T00:27:44.000Z | 2020-08-31T11:20:17.000Z | gbpservice/contrib/tests/unit/nfp/configurator/agents/test_generic_config.py | noironetworks/group-based-policy | 58a4be9a90ed4833551ae7ce7a029f2ec065bd29 | [
"Apache-2.0"
] | 4 | 2015-04-10T16:03:47.000Z | 2020-08-31T06:06:32.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
from unittest import mock
import unittest2
from neutron.tests import base
from gbpservice.contrib.nfp.configurator.agents import generic_config as gc
from gbpservice.contrib.nfp.configurator.lib import (
generic_config_constants as const)
from gbpservice.contrib.nfp.configurator.lib import constants as common_const
from gbpservice.contrib.tests.unit.nfp.configurator.test_data import (
fw_test_data as fo)
class GenericConfigRpcManagerTestCase(base.BaseTestCase):
""" Implement test cases for RPC manager methods of generic config agent.
"""
def __init__(self, *args, **kwargs):
super(GenericConfigRpcManagerTestCase, self).__init__(
*args, **kwargs)
self.fo = fo.FakeObjects()
@mock.patch(__name__ + '.fo.FakeObjects.sc')
@mock.patch(__name__ + '.fo.FakeObjects.conf')
def _get_GenericConfigRpcManager_object(self, conf, sc):
""" Retrieves RPC manager object of generic config agent.
:param sc: mocked service controller object of process model framework
:param conf: mocked OSLO configuration file
Returns: object of generic config's RPC manager
and service controller.
"""
agent = gc.GenericConfigRpcManager(sc, conf)
return agent, sc
def _test_event_creation(self, method):
""" Tests event creation and enqueueing for create/delete
operation of generic config agent's RPC manager.
:param method: CONFIGURE_INTERFACES/CLEAR_INTERFACES/
CONFIGURE_ROUTES/CLEAR_ROUTES
Returns: none
"""
agent, sc = self._get_GenericConfigRpcManager_object()
arg_dict = {'context': self.fo.context,
'resource_data': self.fo._fake_resource_data()}
with mock.patch.object(
sc, 'new_event', return_value='foo') as mock_sc_event, (
mock.patch.object(sc, 'post_event')) as mock_sc_rpc_event:
call_method = getattr(agent, method.lower())
call_method(self.fo.context, self.fo._fake_resource_data())
if 'HEALTHMONITOR' in method:
mock_sc_event.assert_called_with(id=method,
data=arg_dict,
key=self.fo.vmid)
else:
mock_sc_event.assert_called_with(id=method,
data=arg_dict, key=None)
mock_sc_rpc_event.assert_called_with('foo')
def test_configure_interfaces_genericconfigrpcmanager(self):
""" Implements test case for configure interfaces method
of generic config agent RPCmanager.
Returns: none
"""
self._test_event_creation(const.EVENT_CONFIGURE_INTERFACES)
def test_clear_interfaces_genericconfigrpcmanager(self):
""" Implements test case for clear interfaces method
of generic config agent RPCmanager.
Returns: none
"""
self._test_event_creation(const.EVENT_CLEAR_INTERFACES)
def test_configure_routes_genericconfigrpcmanager(self):
""" Implements test case for configure routes method
of generic config agent RPCmanager.
Returns: none
"""
self._test_event_creation(const.EVENT_CONFIGURE_ROUTES)
def test_clear_routes_genericconfigrpcmanager(self):
""" Implements test case for clear routes method
of generic config agent RPCmanager.
Returns: none
"""
self._test_event_creation(const.EVENT_CLEAR_ROUTES)
def test_configure_hm_genericconfigrpcmanager(self):
""" Implements test case for configure healthmonitor method
of generic config agent RPCmanager.
Returns: none
"""
self._test_event_creation(const.EVENT_CONFIGURE_HEALTHMONITOR)
@unittest2.skip('not implemented yet')
def test_clear_hm_genericconfigrpcmanager(self):
""" Implements test case for clear healthmonitor method
of generic config agent RPCmanager.
Returns: none
"""
self._test_event_creation(const.EVENT_CLEAR_HEALTHMONITOR)
class GenericConfigEventHandlerTestCase(base.BaseTestCase):
""" Implements test cases for event handler methods
of generic config agent.
"""
def __init__(self, *args, **kwargs):
super(GenericConfigEventHandlerTestCase, self).__init__(
*args, **kwargs)
self.fo = fo.FakeObjects()
self.empty = self.fo.empty_dict
self.context = {'notification_data': {},
'resource': 'interfaces'}
@mock.patch(__name__ + '.fo.FakeObjects.rpcmgr')
@mock.patch(__name__ + '.fo.FakeObjects.drivers')
@mock.patch(__name__ + '.fo.FakeObjects.sc')
def _get_GenericConfigEventHandler_object(self, sc, drivers, rpcmgr):
""" Retrieves event handler object of generic config.
:param sc: mocked service controller object of process model framework
:param rpcmgr: object of configurator's RPC manager
:param drivers: list of driver objects for firewall agent
Returns: object of generic config's event handler
"""
agent = gc.GenericConfigEventHandler(sc, drivers, rpcmgr)
return agent, sc
def _test_handle_event(self, ev):
""" Test handle event method of generic config agent for various
device configuration operations.
:param ev: fake event data which has to be actually sent by
process framework.
Returns: None
"""
agent, sc = self._get_GenericConfigEventHandler_object()
driver = mock.Mock()
with mock.patch.object(
driver, const.EVENT_CONFIGURE_INTERFACES.lower(),
return_value=common_const.SUCCESS) as (mock_config_inte), (
mock.patch.object(
driver, const.EVENT_CLEAR_INTERFACES.lower(),
return_value=common_const.SUCCESS)) as (mock_clear_inte), (
mock.patch.object(
driver, const.EVENT_CONFIGURE_ROUTES.lower(),
return_value=common_const.SUCCESS)) as (
mock_config_src_routes), (
mock.patch.object(
driver, const.EVENT_CLEAR_ROUTES.lower(),
return_value=common_const.SUCCESS)) as (
mock_delete_src_routes), (
mock.patch.object(
sc, 'poll_event')) as mock_hm_poll_event, (
mock.patch.object(
driver, const.EVENT_CONFIGURE_HEALTHMONITOR.lower(),
return_value=common_const.SUCCESS)), (
mock.patch.object(
driver, const.EVENT_CLEAR_HEALTHMONITOR.lower(),
return_value=common_const.SUCCESS)) as mock_clear_hm, (
mock.patch.object(
agent, '_get_driver', return_value=driver)):
if const.EVENT_CONFIGURE_HEALTHMONITOR in ev.id:
ev.id, periodicity = ev.id.split()
agent.handle_event(ev)
resource_data = self.fo._fake_resource_data()
if ev.id == const.EVENT_CONFIGURE_INTERFACES:
mock_config_inte.assert_called_with(
self.fo.context, resource_data)
elif ev.id == const.EVENT_CLEAR_INTERFACES:
mock_clear_inte.assert_called_with(
self.fo.context, resource_data)
elif ev.id == const.EVENT_CONFIGURE_ROUTES:
mock_config_src_routes.assert_called_with(
self.fo.context, resource_data)
elif ev.id == const.EVENT_CLEAR_ROUTES:
mock_delete_src_routes.assert_called_with(
self.fo.context, resource_data)
elif const.EVENT_CONFIGURE_HEALTHMONITOR in ev.id:
if periodicity == const.INITIAL:
mock_hm_poll_event.assert_called_with(
ev,
max_times=const.EVENT_CONFIGURE_HEALTHMONITOR_MAXRETRY)
elif periodicity == const.FOREVER:
mock_hm_poll_event.assert_called_with(ev)
elif ev.id == const.EVENT_CLEAR_HEALTHMONITOR:
mock_clear_hm.assert_called_with(
self.fo.context, resource_data)
def _test_handle_periodic_event(self, ev):
""" Test handle periodic event method of generic config agent
for healthmonitor configuration.
:param ev: fake event data which has to be actually sent by
process framewrok.
Returns: None
"""
agent, _ = self._get_GenericConfigEventHandler_object()
driver = mock.Mock()
with mock.patch.object(
agent, '_get_driver', return_value=driver), (
mock.patch.object(
driver, const.EVENT_CONFIGURE_HEALTHMONITOR.lower(),
return_value=common_const.SUCCESS)) as mock_dvr, (
mock.patch.object(subprocess,
'check_output', return_value=True)):
agent.handle_configure_healthmonitor(ev)
self.assertEqual(mock_dvr.return_value, common_const.SUCCESS)
def test_configure_interfaces_genericconfigeventhandler(self):
""" Implements test case for configure interfaces method
of generic config event handler.
Returns: none
"""
ev = fo.FakeEventGenericConfig()
ev.id = const.EVENT_CONFIGURE_INTERFACES
self._test_handle_event(ev)
def test_clear_interfaces_genericconfigeventhandler(self):
""" Implements test case for clear interfaces method
of generic config event handler.
Returns: none
"""
ev = fo.FakeEventGenericConfig()
ev.id = const.EVENT_CLEAR_INTERFACES
self._test_handle_event(ev)
def test_configure_routes_genericconfigeventhandler(self):
""" Implements test case for configure routes method
of generic config event handler.
Returns: none
"""
ev = fo.FakeEventGenericConfig()
ev.id = const.EVENT_CONFIGURE_ROUTES
self._test_handle_event(ev)
def test_clear_routes_genericconfigeventhandler(self):
""" Implements test case for clear routes method
of generic config event handler.
Returns: none
"""
ev = fo.FakeEventGenericConfig()
ev.id = const.EVENT_CLEAR_ROUTES
self._test_handle_event(ev)
def test_configure_hm_initial_genericconfigeventhandler(self):
""" Implements test case for configure health monitor method
with specified polling in generic config event handler.
Returns: none
"""
ev = fo.FakeEventGenericConfig()
ev.id = 'CONFIGURE_HEALTHMONITOR initial'
self._test_handle_event(ev)
def test_configure_hm_forever_genericconfigeventhandler(self):
""" Implements test case for configure health monitor method
with forever polling in generic config event handler.
Returns: none
"""
ev = fo.FakeEventGenericConfig()
ev.data['resource_data']['nfds'][0].update(
{'periodicity': const.FOREVER})
ev.id = 'CONFIGURE_HEALTHMONITOR forever'
self._test_handle_event(ev)
def test_clear_hm_genericconfigeventhandler(self):
""" Implements test case for clear health monitor method
of generic config event handler.
Returns: none
"""
ev = fo.FakeEventGenericConfig()
ev.id = 'CLEAR_HEALTHMONITOR'
self._test_handle_event(ev)
def test_handle_configure_healthmonitor_genericconfigeventhandler(self):
""" Implements test case for handle configure health monitor
method of generic config event handler.
Returns: none
"""
ev = fo.FakeEventGenericConfig()
ev.id = const.EVENT_CONFIGURE_HEALTHMONITOR
self._test_handle_periodic_event(ev)
| 35.220109 | 79 | 0.628578 |
14be68cfa6bee5057581843cebad7a44b4ea641e | 4,661 | py | Python | src/bin/hub.py | NikkiBytes/mygene.info | 5ec9bd7b73890acdcb2a68c8d57368f44dcdeb42 | [
"Apache-2.0"
] | 78 | 2017-05-26T08:38:25.000Z | 2022-02-25T08:55:31.000Z | src/bin/hub.py | NikkiBytes/mygene.info | 5ec9bd7b73890acdcb2a68c8d57368f44dcdeb42 | [
"Apache-2.0"
] | 105 | 2017-05-18T21:57:13.000Z | 2022-03-18T21:41:47.000Z | src/bin/hub.py | NikkiBytes/mygene.info | 5ec9bd7b73890acdcb2a68c8d57368f44dcdeb42 | [
"Apache-2.0"
] | 19 | 2017-06-12T18:31:54.000Z | 2021-11-10T00:04:43.000Z | #!/usr/bin/env python
import os, logging
from functools import partial
# shut some mouths...
logging.getLogger("elasticsearch").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("requests").setLevel(logging.ERROR)
logging.getLogger('botocore').setLevel(logging.ERROR)
logging.getLogger('boto3').setLevel(logging.ERROR)
import config, biothings
from biothings.utils.version import set_versions
app_folder,_src = os.path.split(os.path.split(os.path.split(os.path.abspath(__file__))[0])[0])
set_versions(config,app_folder)
biothings.config_for_app(config)
from biothings.hub import HubServer
import biothings.hub.databuild.builder as builder
import biothings.utils.mongo as mongo
from biothings.hub.databuild.syncer import SyncerManager, \
ThrottledESJsonDiffSyncer, ThrottledESJsonDiffSelfContainedSyncer
from hub.databuild.mapper import EntrezRetired2Current, Ensembl2Entrez
from hub.databuild.builder import MyGeneDataBuilder
class MyGeneHubServer(HubServer):
def configure_build_manager(self):
retired2current = EntrezRetired2Current(convert_func=int,db_provider=mongo.get_src_db)
ensembl2entrez = Ensembl2Entrez(db_provider=mongo.get_src_db,
retired2current=retired2current)
build_manager = builder.BuilderManager(
builder_class=partial(MyGeneDataBuilder,mappers=[ensembl2entrez]),
job_manager=self.managers["job_manager"])
build_manager.configure()
build_manager.poll()
self.managers["build_manager"] = build_manager
self.logger.info("Using custom builder %s" % MyGeneDataBuilder)
def configure_sync_manager(self):
# prod
sync_manager_prod = SyncerManager(job_manager=self.managers["job_manager"])
sync_manager_prod.configure(klasses=[partial(ThrottledESJsonDiffSyncer,config.MAX_SYNC_WORKERS),
partial(ThrottledESJsonDiffSelfContainedSyncer,config.MAX_SYNC_WORKERS)])
self.managers["sync_manager"] = sync_manager_prod
# test will access localhost ES, no need to throttle
sync_manager_test = SyncerManager(job_manager=self.managers["job_manager"])
sync_manager_test.configure()
self.managers["sync_manager_test"] = sync_manager_test
self.logger.info("Using custom syncer, prod(throttled): %s, test: %s" % (sync_manager_prod,sync_manager_test))
def configure_commands(self):
super().configure_commands() # keep all originals...
self.commands["es_sync_test"] = partial(self.managers["sync_manager_test"].sync,"es",
target_backend=(config.INDEX_CONFIG["env"]["local"]["host"],
config.INDEX_CONFIG["env"]["local"]["index"][0]["index"],
config.INDEX_CONFIG["env"]["local"]["index"][0]["doc_type"]))
self.commands["es_sync_prod"] = partial(self.managers["sync_manager"].sync,"es",
target_backend=(config.INDEX_CONFIG["env"]["prod"]["host"],
config.INDEX_CONFIG["env"]["prod"]["index"][0]["index"],
config.INDEX_CONFIG["env"]["prod"]["index"][0]["doc_type"]))
#self.commands["publish_diff_demo"] = partial(self.managers["diff_manager"].publish_diff,config.S3_APP_FOLDER + "-demo",
# s3_bucket=config.S3_DIFF_BUCKET + "-demo")
#self.commands["snapshot_demo"] = partial(self.managers["index_manager"].snapshot,repository=config.SNAPSHOT_REPOSITORY + "-demo")
#self.commands["publish_snapshot_demo"] = partial(self.managers["index_manager"].publish_snapshot,s3_folder=config.S3_APP_FOLDER + "-demo",
# repository=config.READONLY_SNAPSHOT_REPOSITORY)
## replace default
#self.commands["publish_diff"] = partial(self.managers["diff_manager"].publish_diff,config.S3_APP_FOLDER)
#self.commands["publish_snapshot"] = partial(self.managers["index_manager"].publish_snapshot,s3_folder=config.S3_APP_FOLDER)
import hub.dataload
from hub.datatransform.keylookup import MyGeneKeyLookup
# pass explicit list of datasources (no auto-discovery)
server = MyGeneHubServer(config.ACTIVE_DATASOURCES,name="MyGene.info",
managers_custom_args={"dataplugin" : {"keylookup" : MyGeneKeyLookup}},
api_config=False)
if __name__ == "__main__":
server.start()
| 55.488095 | 147 | 0.670886 |
149cd9f371733eca11c9a6db19afc6edba6319e8 | 1,507 | py | Python | PyPoll/main.py | Trumane12311/python-challenge | 1a58b84b5e4b30047dd3ca3f367b92c43587a3f9 | [
"RSA-MD"
] | null | null | null | PyPoll/main.py | Trumane12311/python-challenge | 1a58b84b5e4b30047dd3ca3f367b92c43587a3f9 | [
"RSA-MD"
] | null | null | null | PyPoll/main.py | Trumane12311/python-challenge | 1a58b84b5e4b30047dd3ca3f367b92c43587a3f9 | [
"RSA-MD"
] | null | null | null | import os
import csv
candidate_votes = {}
candidates = []
total_votes = 0
polldata = os.path.join("Resources", "election_data.csv")
with open(polldata, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
headers = next(csvreader)
for row in csvreader:
candidates = row[2]
if candidates in candidate_votes.keys():
candidate_votes[candidates] = candidate_votes[candidates] + 1
else:
candidate_votes[candidates] = 1
total_votes = sum(candidate_votes.values())
print("Election Results")
print("-------------------------")
print(f'Total Votes: {total_votes}')
print('-------------------------')
percentage = []
for i in candidate_votes:
percentage = round((candidate_votes[i]/total_votes)*100,0)
print(f'{i}: {percentage}% ({candidate_votes[i]})')
for key in candidate_votes.keys():
if candidate_votes[key] == max(candidate_votes.values()):
winner = key
print(f'Winner: {winner}')
output_path = os.path.join("analysis", "electiondata_summary.csv")
with open(output_path, 'w', newline='') as electiondata_summary:
csvwriter = csv.writer(electiondata_summary, delimiter=',')
csvwriter.writerow(["Election Data Summary"])
csvwriter.writerow([f'Total Votes, {total_votes}'])
csvwriter.writerow(["Khan, 63.0%, (2218231)"])
csvwriter.writerow(["Correy, 20.0%, (704200)"])
csvwriter.writerow(["Li, 14.0%, (492940)"])
csvwriter.writerow(["O'Tooley, 3.0%, (105630)"])
csvwriter.writerow(['Winner, Khan']) | 32.06383 | 73 | 0.660252 |
7026e00759a47bd47b3a9d4d120eabae3e61347a | 6,653 | py | Python | tools/validator.py | b00kwrm/artifacts | af4c2e545d3c0d277496c357c51a0e3f82216ecb | [
"Apache-2.0"
] | 1 | 2020-04-02T21:24:01.000Z | 2020-04-02T21:24:01.000Z | tools/validator.py | b00kwrm/artifacts | af4c2e545d3c0d277496c357c51a0e3f82216ecb | [
"Apache-2.0"
] | null | null | null | tools/validator.py | b00kwrm/artifacts | af4c2e545d3c0d277496c357c51a0e3f82216ecb | [
"Apache-2.0"
] | 1 | 2020-04-24T12:39:22.000Z | 2020-04-24T12:39:22.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tool to validate artifact definitions."""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import os
import sys
from artifacts import definitions
from artifacts import errors
from artifacts import reader
from artifacts import registry
class ArtifactDefinitionsValidator(object):
"""Artifact definitions validator."""
LEGACY_PATH = os.path.join('data', 'legacy.yaml')
def __init__(self):
"""Initializes an artifact definitions validator."""
super(ArtifactDefinitionsValidator, self).__init__()
self._artifact_registry = registry.ArtifactDefinitionsRegistry()
self._artifact_registry_key_paths = set()
def _CheckRegistryKeyPath(self, filename, artifact_definition, key_path):
"""Checks a Windows Registry key path.
Args:
filename (str): name of the artifacts definition file.
artifact_definition (ArtifactDefinition): artifact definition.
key_path (str): key path.
Returns:
bool: True if the Registry key path is valid.
"""
result = True
key_path = key_path.upper()
if key_path.startswith('%%CURRENT_CONTROL_SET%%'):
result = False
logging.warning((
'Artifact definition: {0:s} in file: {1:s} contains Windows '
'Registry key path that starts with '
'%%CURRENT_CONTROL_SET%%. Replace %%CURRENT_CONTROL_SET%% with '
'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet').format(
artifact_definition.name, filename))
return result
def _HasDuplicateRegistryKeyPaths(
self, filename, artifact_definition, source):
"""Checks if Registry key paths are not already defined by other artifacts.
Note that at the moment this function will only find exact duplicate
Registry key paths.
Args:
filename (str): name of the artifacts definition file.
artifact_definition (ArtifactDefinition): artifact definition.
source (SourceType): source definition.
Returns:
bool: True if the Registry key paths defined by the source type
are used in other artifacts.
"""
result = False
intersection = self._artifact_registry_key_paths.intersection(
set(source.keys))
if intersection:
duplicate_key_paths = '\n'.join(intersection)
logging.warning((
'Artifact definition: {0:s} in file: {1:s} has duplicate '
'Registry key paths:\n{2:s}').format(
artifact_definition.name, filename, duplicate_key_paths))
result = True
self._artifact_registry_key_paths.update(source.keys)
return result
def CheckFile(self, filename):
"""Validates the artifacts definition in a specific file.
Args:
filename (str): name of the artifacts definition file.
Returns:
bool: True if the file contains valid artifacts definitions.
"""
result = True
artifact_reader = reader.YamlArtifactsReader()
try:
for artifact_definition in artifact_reader.ReadFile(filename):
try:
self._artifact_registry.RegisterDefinition(artifact_definition)
except KeyError:
logging.warning(
'Duplicate artifact definition: {0:s} in file: {1:s}'.format(
artifact_definition.name, filename))
result = False
for source in artifact_definition.sources:
if source.type_indicator in (
definitions.TYPE_INDICATOR_FILE, definitions.TYPE_INDICATOR_PATH):
if definitions.SUPPORTED_OS_WINDOWS in source.supported_os:
for path in source.paths:
number_of_forward_slashes = path.count('/')
number_of_backslashes = path.count('\\')
if (number_of_forward_slashes < number_of_backslashes and
source.separator != '\\'):
logging.warning((
'Incorrect path separator: {0:s} in path: {1:s} defined '
'by artifact definition: {2:s} in file: {3:s}').format(
source.separator, path, artifact_definition.name,
filename))
result = False
elif source.type_indicator == (
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):
# Exempt the legacy file from duplicate checking because it has
# duplicates intentionally.
if (filename != self.LEGACY_PATH and
self._HasDuplicateRegistryKeyPaths(
filename, artifact_definition, source)):
result = False
for key_path in source.keys:
if not self._CheckRegistryKeyPath(
filename, artifact_definition, key_path):
result = False
elif source.type_indicator == (
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):
for key_value_pair in source.key_value_pairs:
if not self._CheckRegistryKeyPath(
filename, artifact_definition, key_value_pair['key']):
result = False
except errors.FormatError as exception:
logging.warning(
'Unable to validate file: {0:s} with error: {1!s}'.format(
filename, exception))
result = False
return result
def GetUndefinedArtifacts(self):
"""Retrieves the names of undefined artifacts used by artifact groups.
Returns:
set[str]: undefined artifacts names.
"""
return self._artifact_registry.GetUndefinedArtifacts()
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
args_parser = argparse.ArgumentParser(
description='Validates an artifact definitions file.')
args_parser.add_argument(
'filename',
nargs='?',
action='store',
metavar='artifacts.yaml',
default=None,
help=('path of the file that contains the artifact '
'definitions.'))
options = args_parser.parse_args()
if not options.filename:
print('Source value is missing.')
print('')
args_parser.print_help()
print('')
return False
if not os.path.isfile(options.filename):
print('No such file: {0:s}'.format(options.filename))
print('')
return False
print('Validating: {0:s}'.format(options.filename))
validator = ArtifactDefinitionsValidator()
if not validator.CheckFile(options.filename):
print('FAILURE')
return False
print('SUCCESS')
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| 31.680952 | 80 | 0.651586 |
a57af4e5f745bceb01449a0cc4ac3195d2eaea32 | 1,116 | py | Python | tramatego/src/tramatego/transforms/domain_to_hash.py | kvsaurav/QRadio | 53299f5bd57b60f76596ed05ba7f1f65b255114d | [
"Apache-2.0"
] | 95 | 2016-03-04T18:34:51.000Z | 2021-08-30T03:43:17.000Z | tramatego/src/tramatego/transforms/domain_to_hash.py | netwrkspider/QRadio | 53299f5bd57b60f76596ed05ba7f1f65b255114d | [
"Apache-2.0"
] | null | null | null | tramatego/src/tramatego/transforms/domain_to_hash.py | netwrkspider/QRadio | 53299f5bd57b60f76596ed05ba7f1f65b255114d | [
"Apache-2.0"
] | 21 | 2016-03-10T12:19:59.000Z | 2020-05-09T18:54:00.000Z | #!/usr/bin/env python
from canari.maltego.utils import debug, progress
from canari.framework import configure #, superuser
from canari.maltego.entities import Domain, Phrase
from common.launchers import get_qradio_data
__author__ = 'Zappus'
__copyright__ = 'Copyright 2016, TramaTego Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'Zappus'
__email__ = 'zappus@protonmail.com'
__status__ = 'Development'
__all__ = [
'dotransform',
#'onterminate' # comment out this line if you don't need this function.
]
#@superuser
@configure(
label='Domain to Hash',
description='',
uuids=[ 'TramaTego.v1.DomainToHash' ],
inputs=[ ( 'TramaTego', Domain ) ],
debug=True
)
def dotransform(request, response, config):
command = "--domain_to_hash " + request.value
qradio_output = get_qradio_data(command, 2)
for entry in qradio_output:
response += Phrase(entry)
return response
def onterminate():
"""
TODO: Write your cleanup logic below or delete the onterminate function and remove it from the __all__ variable
"""
pass | 24.8 | 115 | 0.71147 |
956ee94706d4bbf8ef51ca4b6020077ac36ca41a | 851 | py | Python | website/models.py | ishika4242/portfolio | dff74d13d91e9d7dccec9677d3804aa5b7cad7e7 | [
"Apache-2.0"
] | null | null | null | website/models.py | ishika4242/portfolio | dff74d13d91e9d7dccec9677d3804aa5b7cad7e7 | [
"Apache-2.0"
] | null | null | null | website/models.py | ishika4242/portfolio | dff74d13d91e9d7dccec9677d3804aa5b7cad7e7 | [
"Apache-2.0"
] | null | null | null | from django.db import models
# Create your models here.
class Working(models.Model):
title = models.CharField(max_length=100)
descreption = models.CharField(max_length=100)
image = models.ImageField(upload_to='pics')
class Servicing(models.Model):
title = models.CharField(max_length=100)
descreption = models.CharField(max_length=100)
image = models.ImageField(upload_to='pics')
class Contacting(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField(max_length=100)
subject = models.CharField(max_length=100)
message = models.CharField(max_length=1000)
class Workings(models.Model):
title = models.CharField(max_length=100)
descreption = models.CharField(max_length=100)
image = models.ImageField(upload_to='pics')
link = models.CharField(max_length=1000) | 30.392857 | 50 | 0.740306 |
33244fd58141b00dc8ed163b19774ccab4e35c2d | 6,218 | py | Python | main.py | NFhbar/tenant-base | e501bc136c478574fd16f3e38bd781b671245d29 | [
"MIT"
] | null | null | null | main.py | NFhbar/tenant-base | e501bc136c478574fd16f3e38bd781b671245d29 | [
"MIT"
] | 4 | 2019-01-09T21:40:37.000Z | 2021-06-01T23:17:10.000Z | main.py | NFhbar/tenant-base | e501bc136c478574fd16f3e38bd781b671245d29 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
TenantBase Backend
Key-value server implemented in SQlite and memmcached.
To run the interface:
$ python3 main.py -s database.sqlite
Interface options are:
Interface options:
- set key value exptime
- get key
- delete key
- exit
To return all key-value pairs in db:
$ python3 main.py -sh database.sqlite
"""
from os import system
import sys
import logging
import argparse
from pymemcache.client import Client
import helpers
# globals
LOG = logging.getLogger()
LOG.setLevel(logging.INFO)
KeyValueHelper = helpers.KeyValueHelper
Color = helpers.Color_Helper
HOST = 'localhost'
PORT = 11211
START_MEMCACHED = 'memcached -d'
# Menu options
DESCRIPTION = '{b}TenantBase -- sqlite3 + memcached interface.{e}'.format(b=Color.BOLD, e=Color.END)
SERVE_HELP = 'starts interface:\n $ python3 -s filename'
SHOW_HELP = 'displays all key-value pairs: \n $ python3 -sh filename'
MEMCAHED_INTERFACE_HELP = '{b}Interface options:{e}\n \
- set key value exptime\n \
- get key\n \
- delete key\n \
- exit\n'.format(b=Color.BOLD, e=Color.END)
DATABASE = 'database.sqlite'
# functions
def run_command(command=None):
"""Wrapper around os.system, will raise exception if command did not exit cleanly"""
exit_code = system(command)
if exit_code != 0:
LOG.error('Cannot execute command: %s. Did you install it?', command)
raise Exception('Cannot execute command: %s. Did you install it?' % command)
def parse_args(args=None):
"""parses command line arguments"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-s', '--serve', type=str, dest='serve',
help=SERVE_HELP, metavar=DATABASE)
parser.add_argument('-sh', '--show', type=str, dest='show',
help=SHOW_HELP, metavar=DATABASE)
return parser.parse_args(args)
def set_value(user_input=None, client=Client((HOST, PORT)), db_name=DATABASE):
"""sets key-value pair in memcached and sqlite"""
if user_input is None:
return []
client.set(user_input[1], user_input[2])
db_conn = KeyValueHelper(db_name)
db_conn[user_input[1]] = user_input[2]
db_conn.close()
return [user_input[1], user_input[2]]
def get_value(user_input=None, client=Client((HOST, PORT)), db_name=DATABASE):
"""gets the value from key from memcached or sqlite"""
if user_input is None:
return None
response = client.get(user_input[1])
if response is None:
LOG.info('%s not found in memcached, searching sqlite', user_input[1])
db_conn = KeyValueHelper(db_name)
try:
response = db_conn[user_input[1]]
LOG.info('%s found in sqlite. Adding to memcahed', user_input[1])
client.set(user_input[1], response)
except KeyError as error:
LOG.info('%s not found in sqlite', user_input[1])
LOG.error(error)
db_conn.close()
else:
response = response.decode('utf-8')
return response
def delete_value(user_input=None, client=Client((HOST, PORT)), db_name=DATABASE):
"""deletes the value from memcached and sqlite"""
if user_input is None:
return False
response = client.delete(user_input[1])
try:
db_conn = KeyValueHelper(db_name)
del db_conn[user_input[1]]
db_conn.close()
except KeyError as error:
LOG.info('%s not found', user_input[1])
LOG.error(error)
response = False
return response
# main
def main(argv=None):
"""main function"""
parser = parse_args(argv)
if parser.serve:
try:
run_command(START_MEMCACHED)
print('{g}Memcached server started on {h}:{p}{e}\n'
.format(g=Color.GREEN, h=HOST, p=PORT, e=Color.END))
client = Client((HOST, PORT))
except ConnectionRefusedError as error:
LOG.error('Error: %s', error)
sys.exit(1)
exit = False
while exit is False:
user_input = [i for i in input(MEMCAHED_INTERFACE_HELP).split()]
if user_input[0] == 'set':
if len(user_input) != 4:
print('{y}Please include key, value, and exptime ex:\n \
set key value exptime.\n{e}'
.format(y=Color.YELLOW, e=Color.END))
else:
response = set_value(user_input, client, parser.serve)
print('{g}{r} set in memcached and sqlite.\n{e}'
.format(g=Color.GREEN, r=response, e=Color.END))
elif user_input[0] == 'get':
if len(user_input) != 2:
print('{y}Please include key, ex:\n get key{e}'
.format(y=Color.YELLOW, e=Color.END))
else:
response = get_value(user_input, client, parser.serve)
print('{g}{r}{e}\n'.format(g=Color.GREEN, r=response, e=Color.END))
elif user_input[0] == 'delete':
if len(user_input) != 2:
print('{y}Please include key, ex:\n delete key{e}'
.format(y=Color.YELLOW, e=Color.END))
else:
response = delete_value(user_input, client, parser.serve)
print('{g}{r}{e}\n'.format(g=Color.GREEN, r=response, e=Color.END))
elif user_input[0] == 'exit':
exit = True
print('{g}Shutting down...'.format(g=Color.GREEN))
else:
print('{y}Incorrect option.{e}\n'.format(y=Color.YELLOW, e=Color.END))
elif parser.show:
print('{b}Current key value pairs in sqlite\n{e}'.format(b=Color.BOLD, e=Color.END))
db_conn = KeyValueHelper(parser.show)
for key, value in db_conn:
print('{k} {v}'.format(k=key, v=value))
print('\n{b}Closing sqlite3 connection...{e}\n'.format(b=Color.BOLD, e=Color.END))
db_conn.close()
print('{g}Shutting down...'.format(g=Color.GREEN))
else:
print('{y}Run python main.py -h for help{e}'.format(y=Color.YELLOW, e=Color.END))
if __name__ == '__main__':
main()
| 32.051546 | 100 | 0.601319 |
6178bba65a848c8ab9d82c96c25c06a699270946 | 1,113 | py | Python | output_trajectory.py | UcefMountacer/Direct_RGB-D_SLAM | 3a3841ae9483b35420db8f82dd700fe1c143cac3 | [
"MIT"
] | null | null | null | output_trajectory.py | UcefMountacer/Direct_RGB-D_SLAM | 3a3841ae9483b35420db8f82dd700fe1c143cac3 | [
"MIT"
] | null | null | null | output_trajectory.py | UcefMountacer/Direct_RGB-D_SLAM | 3a3841ae9483b35420db8f82dd700fe1c143cac3 | [
"MIT"
] | null | null | null | import numpy as np
import utils.conversions as conv
'''
traj = np.load('traj_0.npy')
num = len(traj)
absolute_pose_path = 'kf_index_level4.txt'
f = open(absolute_pose_path)
line = f.readlines()
for i in range(num):
trans = traj[i]
quater = conv.trans_to_quater(trans)
timestamp = line[i].split()[0]
with open('pose_op_level4_with_auto_lc.txt', 'a') as file_handle:
file_handle.write(timestamp)
for ii in range(len(quater)):
file_handle.write(' ' + str(quater[ii]))
file_handle.write('\n')
'''
def return_trajectory():
traj = np.load('traj_19.npy')
num = len(traj)
absolute_pose_path = 'kf_index_level4.txt'
f = open(absolute_pose_path)
line = f.readlines()
for i in range(num):
trans = traj[i]
quater = conv.trans_to_quater(trans)
timestamp = line[i].split()[0]
with open('readar_traj_optimized.txt', 'a') as file_handle:
file_handle.write(timestamp)
for ii in range(len(quater)):
file_handle.write(' ' + str(quater[ii]))
file_handle.write('\n')
| 27.146341 | 69 | 0.62354 |
7a932bf8ef4b40c24a7c6f19afd4ef8d7d12563e | 25,931 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/aio/operations/_gallery_applications_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2021-09-07T18:39:05.000Z | 2021-09-07T18:39:05.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/aio/operations/_gallery_applications_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/aio/operations/_gallery_applications_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._gallery_applications_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_gallery_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class GalleryApplicationsOperations:
"""GalleryApplicationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application: "_models.GalleryApplication",
**kwargs: Any
) -> "_models.GalleryApplication":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(gallery_application, 'GalleryApplication')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application: "_models.GalleryApplication",
**kwargs: Any
) -> AsyncLROPoller["_models.GalleryApplication"]:
"""Create or update a gallery Application Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition is to be created.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition to be created
or updated. The allowed characters are alphabets and numbers with dots, dashes, and periods
allowed in the middle. The maximum length is 80 characters.
:type gallery_application_name: str
:param gallery_application: Parameters supplied to the create or update gallery Application
operation.
:type gallery_application: ~azure.mgmt.compute.v2021_07_01.models.GalleryApplication
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryApplication or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_07_01.models.GalleryApplication]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplication"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application=gallery_application,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application: "_models.GalleryApplicationUpdate",
**kwargs: Any
) -> "_models.GalleryApplication":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(gallery_application, 'GalleryApplicationUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application: "_models.GalleryApplicationUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.GalleryApplication"]:
"""Update a gallery Application Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition is to be updated.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition to be updated.
The allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the
middle. The maximum length is 80 characters.
:type gallery_application_name: str
:param gallery_application: Parameters supplied to the update gallery Application operation.
:type gallery_application: ~azure.mgmt.compute.v2021_07_01.models.GalleryApplicationUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryApplication or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_07_01.models.GalleryApplication]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplication"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application=gallery_application,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs: Any
) -> "_models.GalleryApplication":
"""Retrieves information about a gallery Application Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery from which the Application
Definitions are to be retrieved.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition to be
retrieved.
:type gallery_application_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GalleryApplication, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_07_01.models.GalleryApplication
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete a gallery Application.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition is to be deleted.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition to be deleted.
:type gallery_application_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
@distributed_trace
def list_by_gallery(
self,
resource_group_name: str,
gallery_name: str,
**kwargs: Any
) -> AsyncIterable["_models.GalleryApplicationList"]:
"""List gallery Application Definitions in a gallery.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery from which Application
Definitions are to be listed.
:type gallery_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryApplicationList or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_07_01.models.GalleryApplicationList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplicationList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_gallery_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
template_url=self.list_by_gallery.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_gallery_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("GalleryApplicationList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_gallery.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications'} # type: ignore
| 47.492674 | 224 | 0.682272 |
798f23e2390854df2d542633b076813efe8a140e | 165 | py | Python | Dynamic Programming/11727.py | kjh9267/BOJ_Python | b4d2ae09c252cc9280df93ccecbd07880947827e | [
"Apache-2.0"
] | null | null | null | Dynamic Programming/11727.py | kjh9267/BOJ_Python | b4d2ae09c252cc9280df93ccecbd07880947827e | [
"Apache-2.0"
] | null | null | null | Dynamic Programming/11727.py | kjh9267/BOJ_Python | b4d2ae09c252cc9280df93ccecbd07880947827e | [
"Apache-2.0"
] | null | null | null | import sys
n = int(sys.stdin.readline())
dp = [0]*1000
dp[0] = 1
dp[1] = 3
dp[2] = 5
for i in range(3,n):
dp[i] = (dp[i-1] + dp[i-2] * 2)%10007
print(dp[n-1]) | 13.75 | 41 | 0.527273 |
0f3705a7360ba091cf5bc725a06e64d2eb7cb6c8 | 133 | py | Python | example/run.py | SeanMabli/aiinpy | bd332fce454c489e236878c9da91bb86ec6dda14 | [
"MIT"
] | null | null | null | example/run.py | SeanMabli/aiinpy | bd332fce454c489e236878c9da91bb86ec6dda14 | [
"MIT"
] | null | null | null | example/run.py | SeanMabli/aiinpy | bd332fce454c489e236878c9da91bb86ec6dda14 | [
"MIT"
] | null | null | null | import subprocess
while True:
print(subprocess.Popen("python3 gan-mnist.py", shell=True, stdout=subprocess.PIPE).communicate()[0]) | 33.25 | 102 | 0.774436 |
bbf2f223b919bb13a1823ba572899d45b0e1154c | 2,295 | py | Python | docs/source/parallel/brown2004_ray_library.py | sys-bio/roadrunner | f0a757771ef0e337ddf7409284910e1627c3ad71 | [
"Apache-2.0"
] | 29 | 2015-05-21T21:06:06.000Z | 2021-12-06T15:33:30.000Z | docs/source/parallel/brown2004_ray_library.py | sys-bio/roadrunner | f0a757771ef0e337ddf7409284910e1627c3ad71 | [
"Apache-2.0"
] | 689 | 2015-01-27T21:45:59.000Z | 2022-03-30T23:47:28.000Z | docs/source/parallel/brown2004_ray_library.py | sys-bio/roadrunner | f0a757771ef0e337ddf7409284910e1627c3ad71 | [
"Apache-2.0"
] | 23 | 2015-06-25T22:57:21.000Z | 2021-11-06T02:03:28.000Z | import numpy as np
from roadrunner import RoadRunner
from roadrunner.testing import TestModelFactory as tmf
from multiprocessing import cpu_count
import ray
import time
from platform import platform
import cpuinfo # pip install py-cpuinfo
NCORES = cpu_count()
NSIMS = 1000000
ray.init(ignore_reinit_error=True)
@ray.remote
class SimulatorActorPath(object):
"""Ray actor to execute simulations."""
def __init__(self, r: RoadRunner):
self.r: RoadRunner = r
def simulate(self, size=1):
num_points = 10000
# results = np.ndarray((size, num_points, 2)) # 2 for 1 model species and time
for k in range(size):
self.r.resetAll()
self.r.simulate(0, 100, num_points)
# return results
if __name__ == '__main__':
# setup timing
start = time.time()
# get sbml to work with from one of our test modules
sbml = tmf.Brown2004().str()
times = [1, 10, 100, 1000, 10000, 100000]
NSIMS = 100000
# create our roadrunner instance
r = RoadRunner(sbml)
# set the seed for reproducuble example
gillespie_integrator = r.getIntegrator()
gillespie_integrator.seed = 1234
simulators = [SimulatorActorPath.remote(r) for _ in range(NCORES)]
# run simulations
tc_ids = []
for k, simulator in enumerate(simulators):
tcs_id = simulator.simulate.remote(size=int(np.floor(NSIMS / NCORES)))
tc_ids.append(tcs_id)
results = ray.get(tc_ids)
print(results)
duration = time.time() - start
# the time it took in serial
serial_time = 64.92753291130066
# compute speedup
# speedup = serial_time / duration
print(f'Took {duration} seconds to run', NSIMS, 'stochastic simulations on', NCORES, 'cores')
# print(f'Speed up is {speedup}')
cpu_info = cpuinfo.get_cpu_info()
print(f'Platform: {platform()}')
print('python_version:', cpu_info['python_version'])
print('Processor:', cpu_info['brand_raw'])
'''
Output:
Took 99.32935857772827 seconds to run 1000000 stochastic simulations on 16 cores
Speed up is 0.6536590373780867
Platform: Windows-10-10.0.22000-SP0
python_version: 3.9.5.final.0 (64 bit)
Processor: 11th Gen Intel(R) Core(TM) i9-11980HK @ 2.60GHz
'''
| 27.321429 | 97 | 0.667102 |
ee0d972d5e7aba17145ba2dc65dc7df32eb41c59 | 1,430 | py | Python | app/server/calculator/Calculator/helper.py | Nidhikokande/601_final_project | 2dbd18577a876c73bb0fcf7d10695257de44cafe | [
"MIT"
] | null | null | null | app/server/calculator/Calculator/helper.py | Nidhikokande/601_final_project | 2dbd18577a876c73bb0fcf7d10695257de44cafe | [
"MIT"
] | null | null | null | app/server/calculator/Calculator/helper.py | Nidhikokande/601_final_project | 2dbd18577a876c73bb0fcf7d10695257de44cafe | [
"MIT"
] | null | null | null | import functools
class Helper():
@staticmethod
def validateNumberInput(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
counter = 0
containStringInput = False
for item in args:
if counter >= 1:
if type(item) not in (float,int):
print(type(item))
containStringInput = True
break
counter += 1
if containStringInput:
raise ValueError("Input is not int or float")
else:
return func(*args, **kwargs)
return wrapper
@staticmethod
def validateListInput(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
containListInput = any([type(item) in (list,tuple) for item in args[1:len(args)]])
for item in args[1:len(args)]:
if type(item) in (list,tuple):
if len(item) == 0:
raise ValueError("list cannot be empty")
if any([type(num) not in (int, float) for num in item]):
raise ValueError("Number in the list not int or float")
if not containListInput:
raise ValueError("Parameter does not have list input")
else:
return func(*args, **kwargs)
return wrapper
| 37.631579 | 94 | 0.502098 |
1cf57c012c5e74bebb99d348a83a915ceff0a5c9 | 693 | py | Python | test/test_development.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | test/test_development.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | test/test_development.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | """
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import h1
from h1.model.tag import Tag
globals()['Tag'] = Tag
from h1.model.development import Development
class TestDevelopment(unittest.TestCase):
"""Development unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDevelopment(self):
"""Test Development"""
# FIXME: construct object with mandatory attributes with example values
# model = Development() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 18.236842 | 79 | 0.652237 |
c57992e7e7758367c4ad9f742010856ca29f5ee2 | 5,114 | py | Python | sarsa.py | rmoehn/tf-cartpole | fff596518ffdf4614376958c28532efa6381e570 | [
"MIT"
] | null | null | null | sarsa.py | rmoehn/tf-cartpole | fff596518ffdf4614376958c28532efa6381e570 | [
"MIT"
] | null | null | null | sarsa.py | rmoehn/tf-cartpole | fff596518ffdf4614376958c28532efa6381e570 | [
"MIT"
] | null | null | null | import itertools
import sys
import time
sys.path.append("../cartpole")
import gym
import matplotlib
matplotlib.use('GTK3Agg')
# pylint: disable=unused-import
from matplotlib import pyplot as plt
import numpy as np
import tensorflow as tf
# pylint: disable=unused-import
from tensorflow.python import debug as tf_debug
import pyrsistent
#### Helper functions
def true_with_prob(p):
return np.random.rand(1)[0] < p
#### Parameters of the algorithm and training
alpha = tf.constant(0.001, dtype=tf.float64)
lmbda = tf.constant(0.9, dtype=tf.float64)
epsi = 0.1
fourier_order = 3
N_episodes = 400
N_max_steps = 500
#### Setup for the environment
env = gym.make('CartPole-v1')
high = np.array([2.5, 4.4, 0.28, 3.9])
o_ranges = np.array([-high, high])
#### Derived values
N_acts = env.action_space.n # Assumes discrete action space
N_dims = o_ranges.shape[1]
N_weights_per_a = (fourier_order + 1)**N_dims
#### Fourier function approximation
C = np.array(
list( itertools.product(range(fourier_order+1), repeat=N_dims) ),
dtype=np.int32 )
tC = tf.constant(C, dtype=tf.float64)
tlow = tf.constant(-high)
to_ranges_diff = tf.constant( np.diff(o_ranges, axis=0) )
tpi = tf.constant(np.pi, dtype=tf.float64)
def phi(local_to, name=None):
tnc_o = tf.div( tf.subtract(local_to, tlow), to_ranges_diff)
# normalized, centered
return tf.cos( tf.mul(tpi, tf.matmul(tC, tnc_o, transpose_b=True)),
name=name )
#### Set up variables for the algorithm
vtheta = tf.Variable(tf.zeros([N_acts, N_weights_per_a], dtype=tf.float64),
name="theta")
tf.summary.histogram("vtheta", vtheta)
telig_zeroes = tf.zeros([N_acts, N_weights_per_a], dtype=tf.float64)
velig = tf.Variable(telig_zeroes)
tf.summary.histogram("velig", velig)
#### Set up placeholders for the algorithm
to = tf.placeholder(tf.float64, shape=high.shape, name="to")
tpo = tf.placeholder(tf.float64, shape=high.shape, name="tpo")
tr = tf.placeholder(tf.float64, shape=[])
ta = tf.placeholder(tf.int32, shape=[])
tpa = tf.placeholder(tf.int32, shape=[])
#### Assemble the graph
tphio = phi(to, name="to")
tphipo = phi(tpo, name="tpo")
tQall = tf.squeeze(tf.transpose(tf.matmul(vtheta, tphio)))
tga = tf.argmax(tQall, axis=0)
vthetaa = tf.slice(vtheta, [tf.squeeze(ta), 0], [1, N_weights_per_a])
tpQoa = tf.squeeze( tf.matmul(vthetaa, tphio, name='tpQoa') )
vthetapa = tf.slice(vtheta, [tf.squeeze(tpa), 0], [1, N_weights_per_a])
tpQpopa = tf.squeeze( tf.matmul(vthetapa, tphipo, name='tpQpopa') )
velig_a = tf.slice(velig, [tf.squeeze(ta), 0], [1, N_weights_per_a])
add_to_elig = tf.scatter_add(velig, [tpa], tf.transpose(tphipo))
degrade_elig = velig.assign(lmbda * velig)
reset_elig = velig.assign(telig_zeroes)
update = alpha * (tpQpopa - (tr + tpQoa)) * velig
update_theta = tf.assign_sub(vtheta, update)
#### Core algorithm
Timestep = pyrsistent.immutable('o, a, phio')
def think(prev, o, r, done):
phio = sess.run(tphio, feed_dict={to: o})
if not done:
ga, pQall = sess.run([tga, tQall], feed_dict={tphio: phio})
if not true_with_prob(epsi):
a = ga
else:
a = env.action_space.sample()
pQoa = pQall[a]
else:
a = None
phio = None
pQoa = 0
if prev is not None:
sess.run(add_to_elig, {tpa: prev.a, tphipo: prev.phio})
sess.run(update_theta, feed_dict={tphipo: prev.phio,
tpa: prev.a,
tpQoa: pQoa,
tr: r})
sess.run(degrade_elig)
return a, Timestep(o, a, phio)
def wrapup(prev, o, r, done=False):
if done:
think(prev, o, r, done=True)
sess.run(reset_elig)
return None
with tf.Session() as sess:
#sess = tf_debug.LocalCLIDebugWrapperSession(sess)
#sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
#summary_writer = tf.summary.FileWriter("tf-logs", sess.graph)
#merged_summary = tf.summary.merge_all()
init = tf.global_variables_initializer()
sess.run(init)
t1 = time.time()
for n_episode in xrange(N_episodes):
previous = None
observation = env.reset()
reward = 0
is_done = False
n_step = 0
for n_step in xrange(N_max_steps):
action, previous = think(previous, observation, reward, is_done)
observation, reward, is_done, _ = env.step(action)
if is_done:
break
wrapup(previous, observation, reward,
done=(is_done and (n_step != N_max_steps - 1)))
previous = None
#if n_episode % 10 == 0:
#summary = sess.run(merged_summary)
#summary_writer.add_summary(summary, n_episode)
print n_step
t2 = time.time()
print "time %s" % (t2 - t1)
| 27.058201 | 76 | 0.612632 |
2e20e28efd19b1958e354788a973bb4d1642c71f | 11,965 | py | Python | Aula27/exercicios/exercicio3.py | diegocolombo1989/Trabalho-Python | 4603117bebfb6e801c3289e108b4e8f29442ab6f | [
"MIT"
] | null | null | null | Aula27/exercicios/exercicio3.py | diegocolombo1989/Trabalho-Python | 4603117bebfb6e801c3289e108b4e8f29442ab6f | [
"MIT"
] | null | null | null | Aula27/exercicios/exercicio3.py | diegocolombo1989/Trabalho-Python | 4603117bebfb6e801c3289e108b4e8f29442ab6f | [
"MIT"
] | null | null | null | # Aula 21 - 16-12-2019
# Metodos da lista
from geradorlista import lista_simples_int_str
from geradorlista import lista_simples_inpura_int_str
from geradorlista import lista_simples_int
from geradorlista import lista_simples_str
from geradorlista import lista_simples_impura
from geradorlista import embaralhar
from geradorlista import embaralhar_int_str_hard
from geradorlista import binario
# Exercícios baseados do livro Pense em Python.
# 1) Escreva uma função chamada nested_sum que receba a lista de listas de números
# inteiros (lista1), retorne uma lista única e print a soma de todos os elementos.
# Por exemplo:
# >>> t = [[1, 2], [3], [4, 5, 6]]
# >>> lista = nested_sum(t)
# 21
# >>> lista
# [1, 2, 3, 4, 5, 6]
lista1 = [lista_simples_int() ,lista_simples_int(), lista_simples_int()]
# 2) Com as seguintes listas, transforme em uma string para poder gravar (futuramente) em um arquivo!
lista_cadastro = ['codigo', 'cpf', 'nome_completo', 'data_de_nascimento',
'estado', 'cidade', 'cep', 'bairro', 'rua', 'numero', 'complemento']
lista_cadastrados = ['1', '11111111111', 'João Carlos', '12/12/90',
'SC', 'Camboriú', '8833', 'Tabuleiro', 'Cerejeiras', '45', 'ap 101']
lista_cadastrados1 = ['2', '22222222222', 'Paulo Roberto', '23/01/89',
'SC', 'Blumenau', '99999', 'Velha', '7 de setembro', '55', '']
# 3) Com as seguintes lista, transforme em uma string para poder gravar (futuramente) em um arquivo!
lista_cadastros = [['1', 'Arnaldo', '23', 'm', 'alexcabeludo2@hotmail.com', '014908648117'], ['2', 'Haroldo', '44', 'f', 'baratarebelde@gmail.com', '050923172729'], ['3', 'Pilar', '50', 'm', 'wanderson10sp@gmail.com', '018937341049'], ['4', 'Suzete Salvador', '45', 'f', 'eladiomp2@yahoo.com.br', '056928409823'], ['5', 'Riane', '37', 'f', 'orkutzimpower@terra.com.br', '018916004377'], ['6', 'Waldir', '34', 'f', 'nandah.s2@bol.com.br', '058903756441'], ['7', 'Lilian', '22', 'f', 'arydoido@gmail.com', '031958621596'], ['8', 'Matilde', '20', 'm', 'eu_kaka_@hotmail.com', '012941959390'], ['9', 'Samanta', '19', 'm', 'carro.tuning@yahoo.com.br', '028964480437'], ['10', 'Margarida', '30', 'm', 'paraaconta.08@hotmail.com', '047903547580'], ['11', 'Evelyn', '31', 'm', 'joaosilvaticudo@gmail.com', '053958638386'], ['12', 'Alessio', '29', 'm', 'w.nill02@gmail.com', '033961294774'], ['13', 'Yolanda', '25', 'm', 'patty_karen2005@hotmail.com', '027903312626'], ['14', 'Germana', '33', 'f', 'jarlinhatopdelinhagv@hotmail.com', '053964603415'], ['15', 'Helio', '33', 'f', 'juh.slim@gmail.com', '046997316461'], ['16', 'Liége', '21', 'f', 'gledsonlds@hotmail.com', '056992948431'], ['17', 'Yan', '42', 'm', 'lucapratto@yahoo.com.br', '016963562866'], ['18', 'Silvain', '50', 'f', 'hie.s2@hotmail.com', '021963399433'], ['19', 'Brian', '33', 'f', 'juliagabrielle06@hotmail.com', '027962676732'], ['20', 'Deoclides', '40', 'f', 'patriciamascena@gmail.com', '012961047979'], ['21', 'Jaqueline', '32', 'm', 'aninha183@hotmail.com', '014958997782'], ['22', 'Rosamaria', '45', 'f', 'j_leosao@hotmail.com', '026944672627'], ['23', 'Carla', '42', 'm', 'jhasdfjo@hotmail.com', '046976625208'], ['24', 'Aida Santos', '30', 'f', 'nayara.cristinap@hotmail.com', '034920819199'], ['25', 'Thomas', '19', 'm', 'jfdslinda@bol.com.br', '030974027667'], ['26', 'Naiara', '23', 'm', 'darknees_666@ig.com.br', '018976696717'], ['27', 'Karyne', '17', 'm', 'garotosonhador_1@hotmail.com', '054984689319'], ['28', 'Alenis Dias', '43', 'f', 'vi_vi_cristinaf@hotmail.com', '034980886309'], ['29', 'Grace', '38', 'm', 'amandakell@uol.com.br', '041932906720'], ['30', 'Zacarias', '31', 'm', 'loca.som@hotmail.com', '041926007066']]
# 4) Crie uma função que solicite 5 nomes e retorne uma lista com todos eles
# 5) Com a lista "Nomes", feita no exercicio 4 (anterior) faça uma cópia para 'Nomes2' e adicione
# o nome "Pedro Paulada" no "Nomes" e "Paulo Cacetada" no "Nomes2"
# 6) Com a lista 'lista_aninhada' faça uma cópia e nomeie como 'lista_aninhada_2'. Na lista_aninhada
# adicione ao lado do número 9 o número 10. Na lista_aninhada_2 adicione ao lado do número 8 a frase
# "Aqui não pode ter o número 10!"
lista_aninhada = [1,2,3,[4,5,[7,[9],8],6]]
# 7) Continuando o exercicio, adicione a lista Nomes (exercicio 4) na lista_aninhada entre os números
# 2 e o 3. Na lista_aninhada_2 adicione a "Pedro Pedroca" entre os números 4 e 5.
# Adicione na lista_aninhada, entre os números 1 e 2, a frase 'um, dois' e na lista_aninhada_2,
# entre os números 1 e 2 a frase 'Adiciono qualquer coisa em qualquer lugar nesta lista!'
# 8) Com a lista1, ordene os números de maior para menor!
lista1 = lista_simples_int(100)
# 9) Com a lista2, ordene os números de menor para maior!
lista2 = lista_simples_int(100)
# 10) Usando o metodo, adicione a lista1 e lista2 (já ordenadas) na lista0.
lista0 = []
# 11) Ordene a lista0 e diga qual é o maior valor, menor valor e em quais das listas (lista1 ou lista2)
# estes pertencem.
# 12) Com a lista_aninhada e lista_aninhada2, do exercicio 7, remova todas as alterações que nelas foram
# colocadas. Salve os dados removidos em uma lista e imprima na tela cada item em uma linha
# usando o f-string (use o .pop() )
# 13) Remova, usando o .remove(), os seguintes itens destas listas:
# 13.1) cpf da lista_cadastro
# 13.2) camboriú da lista_cadastrados
# 13.3) Paulo Roberto da lista_cadastrados1
# 13.4) rua
# 13.5) 8833
# 13.6) Velha
# 13.7) João Carlos
# 13.8) 11111111111
# 13.9) cidade
# 13.10) data_de_nascimento
lista_cadastro = ['codigo', 'cpf', 'nome_completo', 'data_de_nascimento',
'estado', 'cidade', 'cep', 'bairro', 'rua', 'numero', 'complemento']
lista_cadastrados = ['1', '11111111111', 'João Carlos', '12/12/90',
'SC', 'Camboriú', '8833', 'Tabuleiro', 'Cerejeiras', '45', 'ap 101']
lista_cadastrados1 = ['2', '22222222222', 'Paulo Roberto', '23/01/89',
'SC', 'Blumenau', '99999', 'Velha', '7 de setembro', '55', '']
# 14) Com a lista_fusao mostre com f-strig e o metodo .index() a posição dos seguintes elementos:
# 14.1) cidade
# 14.2) João Carlos
# 14.3) Camboriú
# 14.4) 12/12/90
# 14.5) 99999
# 14.6) nome_completo
# 14.7) 22222222222
# 14.8) Tabuleiro
# 14.9) numero
lista_fusao = ['codigo', 'cpf', 'nome_completo', 'data_de_nascimento',
'estado', 'cidade', 'cep', 'bairro', 'rua', 'numero', 'complemento',
'1', '11111111111', 'João Carlos', '12/12/90',
'SC', 'Camboriú', '8833', 'Tabuleiro', 'Cerejeiras', '45', 'ap 101',
'2', '22222222222', 'Paulo Roberto', '23/01/89',
'SC', 'Blumenau', '99999', 'Velha', '7 de setembro', '55', '']
# 15) Usando o metodo .index(), Crie uma função que localize a posição dos seguintes nomes:
# Germana, Deoclides, Zacarias, Karyne, Helio, Silvain, Aida Santos
# Esta função deve receber como parametro a lista_cadastros e o nome. Deve retornar uma lista contendo
# o endereço do nome na lista_cadastros.
# Exemplo:
# >>> lista = localize(lista_cadastros,'Alenis Dias')
# >>> lista_cadastros[ lista[0] ][ lista[1] ]
# 'Alenis Dias'
# Dica: Use o tratamento de ecessões para evitar erro ao procurar um indice que não existe!
lista_cadastros = [['1', 'Arnaldo', '23', 'm', 'alexcabeludo2@hotmail.com', '014908648117'], ['2', 'Haroldo', '44', 'f', 'baratarebelde@gmail.com', '050923172729'], ['3', 'Pilar', '50', 'm', 'wanderson10sp@gmail.com', '018937341049'], ['4', 'Suzete Salvador', '45', 'f', 'eladiomp2@yahoo.com.br', '056928409823'], ['5', 'Riane', '37', 'f', 'orkutzimpower@terra.com.br', '018916004377'], ['6', 'Waldir', '34', 'f', 'nandah.s2@bol.com.br', '058903756441'], ['7', 'Lilian', '22', 'f', 'arydoido@gmail.com', '031958621596'], ['8', 'Matilde', '20', 'm', 'eu_kaka_@hotmail.com', '012941959390'], ['9', 'Samanta', '19', 'm', 'carro.tuning@yahoo.com.br', '028964480437'], ['10', 'Margarida', '30', 'm', 'paraaconta.08@hotmail.com', '047903547580'], ['11', 'Evelyn', '31', 'm', 'joaosilvaticudo@gmail.com', '053958638386'], ['12', 'Alessio', '29', 'm', 'w.nill02@gmail.com', '033961294774'], ['13', 'Yolanda', '25', 'm', 'patty_karen2005@hotmail.com', '027903312626'], ['14', 'Germana', '33', 'f', 'jarlinhatopdelinhagv@hotmail.com', '053964603415'], ['15', 'Helio', '33', 'f', 'juh.slim@gmail.com', '046997316461'], ['16', 'Liége', '21', 'f', 'gledsonlds@hotmail.com', '056992948431'], ['17', 'Yan', '42', 'm', 'lucapratto@yahoo.com.br', '016963562866'], ['18', 'Silvain', '50', 'f', 'hie.s2@hotmail.com', '021963399433'], ['19', 'Brian', '33', 'f', 'juliagabrielle06@hotmail.com', '027962676732'], ['20', 'Deoclides', '40', 'f', 'patriciamascena@gmail.com', '012961047979'], ['21', 'Jaqueline', '32', 'm', 'aninha183@hotmail.com', '014958997782'], ['22', 'Rosamaria', '45', 'f', 'j_leosao@hotmail.com', '026944672627'], ['23', 'Carla', '42', 'm', 'jhasdfjo@hotmail.com', '046976625208'], ['24', 'Aida Santos', '30', 'f', 'nayara.cristinap@hotmail.com', '034920819199'], ['25', 'Thomas', '19', 'm', 'jfdslinda@bol.com.br', '030974027667'], ['26', 'Naiara', '23', 'm', 'darknees_666@ig.com.br', '018976696717'], ['27', 'Karyne', '17', 'm', 'garotosonhador_1@hotmail.com', '054984689319'], ['28', 'Alenis Dias', '43', 'f', 'vi_vi_cristinaf@hotmail.com', '034980886309'], ['29', 'Grace', '38', 'm', 'amandakell@uol.com.br', '041932906720'], ['30', 'Zacarias', '31', 'm', 'loca.som@hotmail.com', '041926007066']]
# 16) Conte na lista1 a quantidade dos seguintes valores (use o f-string):
# 16.1) 4529
# 16.2) 29
# 16.3) 1107
# 16.4) 7927
# 16.5) 6967
# 16.6) 5964
# 16.7) 8893
# 16.8) 3972
# 16.9) 10
# 16.10) 8548
# 16.11) 8214
# 16.12) 169
# 16.13) 6214
# 16.14) 15
# 16.15) 4937
# 16.16) 9909
# 16.17) 3412
# 16.18) 6306
# 16.19) 306
lista1 = lista_simples_int(10000)
###################### .reverse() ######################
# 17) Um numero binário, localizado em uma lista 'listabin', necessita ser convertido em número decimal.
# Faça uma função que converta o número binário e retorne o número em decimal. Imprima na tela
# o número binário e o resultado. (use o .reverte())
listabin = binario()
# 18) com as seguintes listas, imprima elas e .reverte() suas posições. Some as posições e retorne a lista com
# as somas.
# Exemplo:
# >>> lista1 = [42,3, 1, 4]
# >>> lista_reversa = [4, 1, 3, 42]
# >>> lista_soma = [46,4, 4, 46]
# 18.1) lista1
lista1 = lista_simples_int(8)
# 18.2) lista2
lista2 = lista_simples_int(8)
# 18.3) lista3
lista3 = lista_simples_int(8)
# 18.4) lista4
lista4 = lista_simples_int(8)
# 18.5) lista5
lista5 = lista_simples_int(8)
# 18.6) lista6
lista6 = lista_simples_int(8)
# 18.7) lista7
lista7 = lista_simples_int(8)
# 18.8) lista8
lista8 = lista_simples_int(8)
# 18.9) lista9
lista9 = lista_simples_int(8)
# 18.10) lista10
lista10 = lista_simples_int(8)
# 18.11) lista11
lista11 = lista_simples_int(8)
# 18.12) lista12
lista12 = lista_simples_int(8)
# 18.13) lista13
lista13 = lista_simples_int(8)
# 18.14) lista14
lista14 = lista_simples_int(8)
# 18.15) lista15
lista15 = lista_simples_int(8)
# 18.16) lista16
lista16 = lista_simples_int(8)
# 18.17) lista17
lista17 = lista_simples_int(8)
# 18.18) lista18
lista18 = lista_simples_int(8)
# 18.19) lista19
lista19 = lista_simples_int(8)
# 19) Com um comando .clear() apague as seguintes informações:
# 19.1) apague toda a lista
lista_aninhada = [1, 2, 3, [4, 5, [1, 2, 3, [4, 5, [7, [9], 8], 6]], 6]]
# 19.2) apague somente: [4, 5, [1, 2, 3, [4, 5, [7, [9], 8], 6]], 6]
lista_aninhada = [1, 2, 3, [4, 5, [1, 2, 3, [4, 5, [7, [9], 8], 6]], 6]]
# 19.3) [4, 5, [7, [9], 8], 6]
lista_aninhada = [1, 2, 3, [4, 5, [1, 2, 3, [4, 5, [7, [9], 8], 6]], 6]]
# 19.4) [7, [9], 8]
lista_aninhada = [1, 2, 3, [4, 5, [1, 2, 3, [4, 5, [7, [9], 8], 6]], 6]]
# 19.5) 5,6
lista_aninhada = [[1,2],[3,4],[5,6],[7,8],[9,10]]
# 19.6) 9,10
lista_aninhada = [[1,2],[3,4],[5,6],[7,8],[9,10]]
| 43.039568 | 2,200 | 0.637192 |
fef108c33c35c6a4dddc4c01a820a0d91391ac53 | 140 | py | Python | server/wiki/apps.py | Jesterboxboy/mahjong-portal | c09362d69a81e81ed30c9159f3a35f9e9def4ac3 | [
"MIT"
] | 10 | 2018-02-12T10:30:22.000Z | 2020-06-29T21:06:15.000Z | server/wiki/apps.py | Jesterboxboy/mahjong-portal | c09362d69a81e81ed30c9159f3a35f9e9def4ac3 | [
"MIT"
] | 62 | 2018-01-05T04:52:38.000Z | 2021-04-10T07:14:45.000Z | server/wiki/apps.py | MahjongRepository/mahjong-leaderboard | 77dfd26cb812c12fa7c2b11e862bb80a9135ccb0 | [
"MIT"
] | 8 | 2018-05-11T11:05:41.000Z | 2021-03-10T08:10:50.000Z | from django.apps import AppConfig
class WikiConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "wiki"
| 20 | 56 | 0.75 |
66d728fb7cf116117eebfb4441123f043eeac707 | 1,436 | py | Python | api/apis/downloads.py | UQ-UQx/uqx_api | 54c132ab345fa698be090c3ab4f72c8bd7b42bc3 | [
"MIT"
] | 3 | 2015-04-13T14:23:39.000Z | 2018-02-13T15:09:30.000Z | api/apis/downloads.py | UQ-UQx/uqx_api | 54c132ab345fa698be090c3ab4f72c8bd7b42bc3 | [
"MIT"
] | 7 | 2015-04-20T07:00:09.000Z | 2021-12-13T19:45:12.000Z | api/apis/downloads.py | UQ-UQx/uqx_api | 54c132ab345fa698be090c3ab4f72c8bd7b42bc3 | [
"MIT"
] | 3 | 2015-03-26T19:29:18.000Z | 2016-01-19T23:17:00.000Z | import api.views
from rest_framework import status
from rest_framework.decorators import api_view
from api.models import Log
# Logging
import logging
logger = logging.getLogger(__name__)
@api_view(['GET'])
def download_os(request):
"""
Returns a count of operating systems which have downloaded videos from the file server (not including youtube)
"""
if api.views.is_cached(request):
return api.views.api_cacherender(request)
data = Log.countfield('request_header_user_agent__os__family',False,"online_access_logs")
return api.views.api_render(request, data, status.HTTP_200_OK)
@api_view(['GET'])
def download_browsers(request):
"""
Returns a count of browsers which have downloaded videos from the file server (not including youtube)
"""
if api.views.is_cached(request):
return api.views.api_cacherender(request)
data = Log.countfield('request_header_user_agent__browser__family',False,"online_access_logs")
return api.views.api_render(request, data, status.HTTP_200_OK)
@api_view(['GET'])
def download_countries(request):
"""
Returns a count of countries which have downloaded videos from the file server (not including youtube)
"""
if api.views.is_cached(request):
return api.views.api_cacherender(request)
data = Log.countfield('country',False,"online_access_logs")
return api.views.api_render(request, data, status.HTTP_200_OK) | 35.02439 | 114 | 0.749304 |
1e780432237b4ac7d53dcd16947785232e113240 | 462 | py | Python | Snippets and Basic Functions/File Operations/CSV/csv-to-buffer.py | sckulkarni246/python-snippets-for-embedded-programmers | 9dfd0b193f86a6de54598917f3d7088a60ec4abc | [
"MIT"
] | null | null | null | Snippets and Basic Functions/File Operations/CSV/csv-to-buffer.py | sckulkarni246/python-snippets-for-embedded-programmers | 9dfd0b193f86a6de54598917f3d7088a60ec4abc | [
"MIT"
] | null | null | null | Snippets and Basic Functions/File Operations/CSV/csv-to-buffer.py | sckulkarni246/python-snippets-for-embedded-programmers | 9dfd0b193f86a6de54598917f3d7088a60ec4abc | [
"MIT"
] | null | null | null | import csv
def conv_strlist_to_numlist(strlist):
numlist = []
for i in range(0,len(strlist)):
numlist.append(int(strlist[i]))
return numlist
def get_data_from_csv(logfile):
datalist = []
with open(logfile,'r') as f:
reader = csv.reader(f)
datalist = list(reader)
for i in range(0,len(datalist)):
if(len(datalist[i]) > 0):
templist = conv_strlist_to_numlist(datalist[i])
return templist
mylist = get_data_from_csv('buffer.csv')
print(mylist) | 23.1 | 50 | 0.71645 |
f5f0ee58a6f8c0ccef2206d19884c8b2d98b4582 | 231 | py | Python | src/skyciv/__init__.py | skyciv/skyciv-pip | 38ec675e2e1c665689caae1283e2b45e9893be29 | [
"MIT"
] | 4 | 2021-04-01T16:33:24.000Z | 2021-10-05T17:11:35.000Z | src/skyciv/__init__.py | skyciv/skyciv-pip | 38ec675e2e1c665689caae1283e2b45e9893be29 | [
"MIT"
] | 1 | 2021-09-29T02:58:14.000Z | 2021-09-30T22:23:41.000Z | src/skyciv/__init__.py | skyciv/skyciv-pip | 38ec675e2e1c665689caae1283e2b45e9893be29 | [
"MIT"
] | 4 | 2021-02-18T17:51:00.000Z | 2021-09-29T04:58:29.000Z |
from skyciv.constants.skyciv_sections import SkyCivSections
from skyciv.classes.api_object.api_object import ApiObject
from skyciv.classes.model.model import Model
from skyciv.lib.request import request
sections = SkyCivSections
| 28.875 | 59 | 0.861472 |
59ba23f2f062cb2f92b052e66f039e0e8148e2bb | 6,106 | py | Python | faster_rcnn/lib/model/utils/net_utils.py | maddie157/BiDet | 3cf7df65a50483e6bdd313d9d945c3dd6393e528 | [
"MIT"
] | 161 | 2020-03-08T10:37:00.000Z | 2022-03-29T13:25:36.000Z | faster_rcnn/lib/model/utils/net_utils.py | maddie157/BiDet | 3cf7df65a50483e6bdd313d9d945c3dd6393e528 | [
"MIT"
] | 41 | 2020-03-10T10:13:06.000Z | 2022-02-16T22:28:29.000Z | faster_rcnn/lib/model/utils/net_utils.py | maddie157/BiDet | 3cf7df65a50483e6bdd313d9d945c3dd6393e528 | [
"MIT"
] | 39 | 2020-03-15T22:33:38.000Z | 2021-11-05T02:45:36.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
import numpy as np
from lib.model.utils.config import cfg
import cv2
import pdb
import random
def save_net(fname, net):
import h5py
h5f = h5py.File(fname, mode='w')
for k, v in net.state_dict().items():
h5f.create_dataset(k, data=v.cpu().numpy())
def load_net(fname, net):
import h5py
h5f = h5py.File(fname, mode='r')
for k, v in net.state_dict().items():
param = torch.from_numpy(np.asarray(h5f[k]))
v.copy_(param)
def weights_normal_init(model, dev=0.01):
if isinstance(model, list):
for m in model:
weights_normal_init(m, dev)
else:
for m in model.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, dev)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, dev)
def clip_gradient(model, clip_norm):
"""Computes a gradient clipping coefficient based on gradient norm."""
totalnorm = 0
for p in model.parameters():
if p.requires_grad and p.grad is not None:
modulenorm = p.grad.norm()
totalnorm += modulenorm ** 2
totalnorm = torch.sqrt(totalnorm).item()
norm = (clip_norm / max(totalnorm, clip_norm))
for p in model.parameters():
if p.requires_grad and p.grad is not None:
p.grad.mul_(norm)
def vis_detections(im, class_name, dets, thresh=0.8):
"""Visual debugging of detections."""
for i in range(np.minimum(10, dets.shape[0])):
bbox = tuple(int(np.round(x)) for x in dets[i, :4])
score = dets[i, -1]
if score > thresh:
cv2.rectangle(im, bbox[0:2], bbox[2:4], (0, 204, 0), 2)
cv2.putText(im, '%s: %.3f' % (class_name, score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN,
1.0, (0, 0, 255), thickness=1)
return im
def adjust_learning_rate(optimizer, decay=0.1):
"""Sets the learning rate to the initial LR decayed by 0.5 every 20 epochs"""
for param_group in optimizer.param_groups:
param_group['lr'] = decay * param_group['lr']
def save_checkpoint(state, filename):
torch.save(state, filename)
def _smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]):
sigma_2 = sigma ** 2
box_diff = bbox_pred - bbox_targets
in_box_diff = bbox_inside_weights * box_diff
abs_in_box_diff = torch.abs(in_box_diff)
smoothL1_sign = (abs_in_box_diff < 1. / sigma_2).detach().float()
in_loss_box = torch.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
+ (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
out_loss_box = bbox_outside_weights * in_loss_box
loss_box = out_loss_box
for i in sorted(dim, reverse=True):
loss_box = loss_box.sum(i)
loss_box = loss_box.mean()
return loss_box
def _crop_pool_layer(bottom, rois, max_pool=True):
# code modified from
# https://github.com/ruotianluo/pytorch-faster-rcnn
# implement it using stn
# box to affine
# input (x1,y1,x2,y2)
"""
[ x2-x1 x1 + x2 - W + 1 ]
[ ----- 0 --------------- ]
[ W - 1 W - 1 ]
[ ]
[ y2-y1 y1 + y2 - H + 1 ]
[ 0 ----- --------------- ]
[ H - 1 H - 1 ]
"""
rois = rois.detach()
batch_size = bottom.size(0)
D = bottom.size(1)
H = bottom.size(2)
W = bottom.size(3)
roi_per_batch = rois.size(0) / batch_size
x1 = rois[:, 1::4] / 16.0
y1 = rois[:, 2::4] / 16.0
x2 = rois[:, 3::4] / 16.0
y2 = rois[:, 4::4] / 16.0
height = bottom.size(2)
width = bottom.size(3)
# affine theta
zero = Variable(rois.data.new(rois.size(0), 1).zero_())
theta = torch.cat([ \
(x2 - x1) / (width - 1),
zero,
(x1 + x2 - width + 1) / (width - 1),
zero,
(y2 - y1) / (height - 1),
(y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)
if max_pool:
pre_pool_size = cfg.POOLING_SIZE * 2
grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, pre_pool_size, pre_pool_size)))
bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(roi_per_batch, batch_size, D, H, W) \
.contiguous().view(-1, D, H, W)
crops = F.grid_sample(bottom, grid)
crops = F.max_pool2d(crops, 2, 2)
else:
grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, cfg.POOLING_SIZE, cfg.POOLING_SIZE)))
bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(roi_per_batch, batch_size, D, H, W) \
.contiguous().view(-1, D, H, W)
crops = F.grid_sample(bottom, grid)
return crops, grid
def _affine_grid_gen(rois, input_size, grid_size):
rois = rois.detach()
x1 = rois[:, 1::4] / 16.0
y1 = rois[:, 2::4] / 16.0
x2 = rois[:, 3::4] / 16.0
y2 = rois[:, 4::4] / 16.0
height = input_size[0]
width = input_size[1]
zero = Variable(rois.data.new(rois.size(0), 1).zero_())
theta = torch.cat([ \
(x2 - x1) / (width - 1),
zero,
(x1 + x2 - width + 1) / (width - 1),
zero,
(y2 - y1) / (height - 1),
(y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)
grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, grid_size, grid_size)))
return grid
def _affine_theta(rois, input_size):
rois = rois.detach()
x1 = rois[:, 1::4] / 16.0
y1 = rois[:, 2::4] / 16.0
x2 = rois[:, 3::4] / 16.0
y2 = rois[:, 4::4] / 16.0
height = input_size[0]
width = input_size[1]
zero = Variable(rois.data.new(rois.size(0), 1).zero_())
theta = torch.cat([
(y2 - y1) / (height - 1),
zero,
(y1 + y2 - height + 1) / (height - 1),
zero,
(x2 - x1) / (width - 1),
(x1 + x2 - width + 1) / (width - 1)
], 1).view(-1, 2, 3)
return theta
| 31.637306 | 110 | 0.557648 |
7133d8bd12b62b8405edae4117cd91646e2f1272 | 679 | py | Python | var/spack/repos/builtin/packages/perl-class-load-xs/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2020-10-15T01:08:42.000Z | 2021-10-18T01:28:18.000Z | var/spack/repos/builtin/packages/perl-class-load-xs/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2019-07-30T10:12:28.000Z | 2019-12-17T09:02:27.000Z | var/spack/repos/builtin/packages/perl-class-load-xs/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlClassLoadXs(PerlPackage):
"""This module provides an XS implementation for portions of
Class::Load."""
homepage = "http://search.cpan.org/~ether/Class-Load-XS-0.10/lib/Class/Load/XS.pm"
url = "http://search.cpan.org/CPAN/authors/id/E/ET/ETHER/Class-Load-XS-0.10.tar.gz"
version('0.10', sha256='5bc22cf536ebfd2564c5bdaf42f0d8a4cee3d1930fc8b44b7d4a42038622add1')
depends_on('perl-class-load', type=('build', 'run'))
| 35.736842 | 94 | 0.726068 |
b1a2d01417e636930ac2116804f7c79e02a92e07 | 337 | py | Python | chapter08_lists/2019/list_problems.py | motazsaad/WDMM1405 | 9363c9557b3fe5dff66064bb4042bbe0f884757b | [
"Apache-2.0"
] | 4 | 2019-03-01T09:27:43.000Z | 2020-10-20T05:19:08.000Z | chapter08_lists/2019/list_problems.py | zainab8585/WDMM1405 | 9363c9557b3fe5dff66064bb4042bbe0f884757b | [
"Apache-2.0"
] | null | null | null | chapter08_lists/2019/list_problems.py | zainab8585/WDMM1405 | 9363c9557b3fe5dff66064bb4042bbe0f884757b | [
"Apache-2.0"
] | 4 | 2019-02-19T18:43:34.000Z | 2022-03-13T19:09:26.000Z | def is_even(num):
if (num % 2) == 0:
return True
else:
return False
def is_odd(num):
if (num % 2) == 0:
return False
else:
return True
print('even test')
for i in range(11):
print(i, 'is even?', is_even(i))
print('odd test')
for i in range(11):
print(i, 'is odd?', is_odd(i))
| 15.318182 | 36 | 0.52819 |
a7ec651f464fb20bd003307b09c8f8399cf45535 | 25,304 | py | Python | batchflow/research/research.py | abrikoseg/batchflow | f1060f452b9407477ac61cea2a658792deca29a6 | [
"Apache-2.0"
] | 87 | 2018-11-16T08:04:12.000Z | 2022-03-24T20:08:44.000Z | batchflow/research/research.py | abrikoseg/batchflow | f1060f452b9407477ac61cea2a658792deca29a6 | [
"Apache-2.0"
] | 243 | 2018-11-29T02:03:55.000Z | 2022-02-21T08:28:29.000Z | batchflow/research/research.py | abrikoseg/batchflow | f1060f452b9407477ac61cea2a658792deca29a6 | [
"Apache-2.0"
] | 35 | 2019-01-29T14:26:14.000Z | 2021-12-30T01:39:02.000Z | #pylint:disable=logging-fstring-interpolation, too-many-arguments
""" Research class for muliple parallel experiments. """
import os
import datetime
import csv
import itertools
import subprocess
import re
import glob
import warnings
import shutil
import psutil
import dill
import multiprocess as mp
import tqdm
from .domain import Domain
from .distributor import Distributor, DynamicQueue
from .experiment import Experiment, Executor
from .results import ResearchResults
from .utils import create_logger, to_list
from .profiler import ResearchProfiler
from ..utils_random import make_seed_sequence
class Research:
""" Research is an instrument to run multiple parallel experiments with different combinations of
parameters called experiment configs. Configs are produced by :class:`domain.Domain` (some kind of
parameters grid.)
Parameters
----------
name : str, optional
name (relative path) of the research and corresponding folder to store results, by default 'research'.
domain : Domain, optional
grid of parameters (see :class:`domain.Domain`) to produce experiment configs, by default None.
experiment : Experiment, optional
description of the experiment (see :class:`experiment.Experiment`), by default None. Experiment can be
defined explicitly as a parameter or constructed by Research methods (`:meth:.add_callable`,
`:meth:.add_generator`, etc.).
n_configs : int, optional
the number of configs to get from domain (see `n_items` of :meth:`domain.Domain.set_iter_params`),
by default None.
n_reps : int, optional
the number of repetitions for each config (see `n_reps` of :meth:`domain.Domain.set_iter_params`), by default 1.
repeat_each : int, optional
see `repeat_each` of :meth:`domain.Domain.set_iter_params`, by default 100.
"""
def __init__(self, name='research', domain=None, experiment=None, n_configs=None, n_reps=1, repeat_each=None):
self.name = name
self.domain = Domain(domain)
self.experiment = experiment or Experiment()
self.n_configs = n_configs
self.n_reps = n_reps
self.repeat_each = repeat_each
self._env = dict() # current state of git repo and other environment information.
self.workers = 1
self.branches = 1
self.n_iters = None
self.devices = None
self.executor_class = Executor
self.dump_results = True
self.parallel = True
self.executor_target = 'threads'
self.loglevel = 'info'
self.bar = True
self.detach = False
self.tasks_queue = None
self.distributor = None
self.monitor = None
self.results = None
self.logger = None
self.process = None
self.debug = False
self.finalize = True
self.random_seed = None
self.profile = False
self.profiler = None
self.memory_ratio = None
self.n_gpu_checks = 3
self.gpu_check_delay = 5
def __getattr__(self, key):
if self.monitor is not None and key in self.monitor.SHARED_VARIABLES:
return getattr(self.monitor, key)
def _method(*args, **kwargs):
getattr(self.experiment, key)(*args, **kwargs)
return self
_method.__doc__ = getattr(self.experiment, key).__doc__
return _method
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
def update_domain(self, function, when, **kwargs):
""" Add domain update functions or update parameters.
Parameters
----------
function : callable or None
function to update domain, returns new domain or None (means not to update).
when : int, str or list, optional
iterations to update (see `when` of `:class:ExecutableUnit`), by default 1.
kwargs : dict
update function parameters.
"""
self.domain.set_update(function, when, **kwargs)
return self
def attach_env_meta(self, **kwargs):
""" Save the information about the current state of project repository: commit, diff, status and others.
Parameters
----------
kwargs : dict
dict where values are bash commands and keys are names of files to save output of the command.
Results will be stored in `env` subfolder of the research.
"""
commands = {
'commit': "git log --name-status HEAD^..HEAD",
'diff': 'git diff',
'status': 'git status -uno',
**kwargs
}
for filename, command in commands.items():
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, _ = process.communicate()
result = re.sub('"image/png": ".*?"', '"image/png": "..."', output.decode('utf'))
if self.dump_results:
if not os.path.exists(os.path.join(self.name, 'env')):
os.makedirs(os.path.join(self.name, 'env'))
with open(os.path.join(self.name, 'env', filename + '.txt'), 'w') as file:
print(result, file=file)
else:
self._env[filename] = result
@property
def env(self):
""" Environment state. """
env = dict()
if self.dump_results:
filenames = glob.glob(os.path.join(self.name, 'env', '*'))
for filename in filenames:
name = os.path.splitext(os.path.basename(filename))[0]
with open(filename, 'r') as file:
env[name] = file.read().strip()
return env
return self._env
def get_devices(self, devices):
""" Return list if lists. Each sublist consists of devices for each branch.
Parameters
----------
devices : int, str, None or list of them
devices to split between workers and branches. (see Example below)
Returns
-------
list of lists of lists
The first nesting level corresponds to workers.
The second to branches.
The third is a list of devices for current branch.
For example, worker with index 2 and its branch with index 3 will get list of devices `devices[2][3]`.
Examples
--------
For 3 workers and 2 branches::
None -> [[[None], [None]], [[None], [None]], [[None], [None]]]
1 -> [[['1'], ['1']], [['1'], ['1']], [['1'], ['1']]]
[1, 2] -> [[['1'], ['1']], [['1'], ['2']], [['2'], ['2']]]
[1, 2, 3, 4, 5] -> [[['1'], ['2']], [['3'], ['4']], [['5'], ['1']]]
[0, 1, ..., 12] -> [[['0', '1'], ['2', '3']],
[['4', '5'], ['6', '7']],
[['8', '9'], ['10', '11']]]
"""
n_branches = self.branches if isinstance(self.branches, int) else len(self.branches)
n_workers = self.workers if isinstance(self.workers, int) else len(self.workers)
total_n_branches = n_workers * n_branches
if devices is None:
devices = [[[None]] * n_branches] * n_workers
if isinstance(devices, (int, str)):
devices = [devices]
if isinstance(devices[0], (int, str)):
if total_n_branches > len(devices):
_devices = list(itertools.chain.from_iterable(
zip(*itertools.repeat(devices, total_n_branches // len(devices)))
))
devices = _devices + devices[:total_n_branches % len(devices)]
else:
devices = devices + devices[:-len(devices) % (total_n_branches)]
if total_n_branches % len(devices) == 0:
branches_per_device = total_n_branches // len(devices)
devices = list(itertools.chain.from_iterable(itertools.repeat(x, branches_per_device) for x in devices))
if len(devices) % total_n_branches == 0:
devices_per_branch = len(devices) // total_n_branches
devices = [
[
[
devices[n_branches * devices_per_branch * i + devices_per_branch * j + k]
for k in range(devices_per_branch)
] for j in range(n_branches)
] for i in range(n_workers)
]
if isinstance(devices[0], list):
def _transform_item(x):
x = to_list(x)
values = [str(item) if isinstance(item, int) else item for item in x]
return values if x is not None else []
devices = [[_transform_item(branch_config) for branch_config in worker_config] for worker_config in devices]
return devices
def create_research_folder(self):
""" Create folder for the research results. """
os.makedirs(self.name)
for subfolder in ['env', 'experiments']:
config_path = os.path.join(self.name, subfolder)
if not os.path.exists(config_path):
os.makedirs(config_path)
def run(self, name=None, workers=1, branches=1, n_iters=None, devices=None, executor_class=Executor,
dump_results=True, parallel=True, executor_target='threads', loglevel=None, bar=True, detach=False,
debug=False, finalize=True, env_meta=None, seed=None, profile=False,
memory_ratio=None, n_gpu_checks=3, gpu_check_delay=5):
""" Run research.
Parameters
----------
name : str, optional
redefine name of the research (if needed), by default None.
workers : int or list of Config instances, optional
number of parallel workers, by default 1. If int, number of parallel workers to execute experiments.
If list of Configs, list of configs for each worker which will be appended to configs from domain. Each
element corresponds to one worker.
branches : int or list of Config instances, optional
number of different branches with different configs with the same root, by default 1.
If list of Configs, list of configs for each branch which will be appended to configs from domain. Each
element corresponds to one branch.
n_iters : int, optional
number of experiment iterations, by default None, None means that experiment will be executed until
StopIteration exception.
devices : str or list, optional
devices to split between workers and branches, by default None.
executor_class : Executor-inherited class, optional
executor for experiments, by default None (means that Executor will be used).
dump_results : bool, optional
dump results or not, by default True.
parallel : bool, optional
execute experiments in parallel in separate processes or not, by default True.
executor_target : 'for' or 'threads', optional
how to execute branches, by default 'threads'.
loglevel : str, optional
logging level, by default 'debug'.
bar : bool or class
use or not progress bar.
detach : bool, optional
run research in separate process or not, by default False.
debug : bool, optional
If False, continue research after exceptions. If True, raise Exception. Can be used only with
`parallel=False` and `executor_target='for'`, by default False.
finalize : bool, optional
continue experiment iteration after exception in some unit or not, by default True.
env_meta : dict or None
kwargs for :meth:`.Research.attach_env_meta`.
seed : bool or int or object with a seed sequence attribute
see :meth:`~batchflow.utils_random.make_seed_sequence`.
profile : bool, optional
perform Research profiling or not, be default False.
memory_ratio : float or None, optional
the ratio of free memory for all devices in worker to start experiment. If None, check will be skipped.
n_gpu_checks : int, optional
the number of such checks
gpu_check_delay : float, optional
time in seconds between checks.
Returns
-------
Research instance
**How does it work**
At each iteration all units of the experiment will be executed in the order in which were added.
If `update_domain` callable is defined, domain will be updated with the corresponding function
accordingly to `when` parameter of :meth:`~.Research.update_domain`.
"""
self.name = name or self.name
self.workers = workers
self.branches = branches
self.devices = self.get_devices(devices)
self.executor_class = executor_class
self.dump_results = dump_results
self.parallel = parallel
self.executor_target = executor_target
self.loglevel = loglevel
self.bar = bar
self.detach = detach
self.profile = profile
self.memory_ratio = memory_ratio
self.n_gpu_checks = n_gpu_checks
self.gpu_check_delay = gpu_check_delay
if debug and (parallel or executor_target not in ['f', 'for']):
raise ValueError("`debug` can be True only with `parallel=False` and `executor_target='for'`")
self.debug = (debug and not parallel)
self.finalize = finalize
self.random_seed = make_seed_sequence(seed)
if n_iters is None and self.experiment.only_callables:
self.n_iters = 1
else:
self.n_iters = n_iters
if dump_results and os.path.exists(self.name):
raise ValueError(f"Research with name '{self.name}' already exists")
self.domain.set_iter_params(n_items=self.n_configs, n_reps=self.n_reps, repeat_each=self.repeat_each)
if self.domain.size is None and (self.domain.update_func is None or self.domain.update_each == 'last'):
warnings.warn("Research will be infinite because has infinite domain and hasn't domain updating",
stacklevel=2)
if self.dump_results:
self.create_research_folder()
self.experiment = self.experiment.dump() # add final dump of experiment results
self.dump_research()
self.loglevel = loglevel or 'info'
else:
self.loglevel = loglevel or 'error'
self.attach_env_meta(**(env_meta or {}))
self.create_logger()
self.logger.info("Research is starting")
n_branches = self.branches if isinstance(self.branches, int) else len(self.branches)
self.tasks_queue = DynamicQueue(self.domain, self, n_branches)
self.distributor = Distributor(self.tasks_queue, self)
self.monitor = ResearchMonitor(self, self.name, bar=self.bar) # process execution signals
self.results = ResearchResults(self.name, self.dump_results)
self.profiler = ResearchProfiler(self.name, self.profile)
def _run():
self.monitor.start(self.dump_results)
self.distributor.run()
self.monitor.stop()
if detach:
self.process = mp.Process(target=_run)
self.process.start()
self.logger.info(f"Detach research[pid:{self.process.pid}]")
else:
_run()
return self
def terminate(self):
""" Kill detached process. """
if self.process is not None:
self.logger.info(f"Terminate research process[pid:{self.process.pid}]")
parent = psutil.Process(self.process.pid)
for child in parent.children(recursive=True):
child.kill()
parent.kill()
def create_logger(self):
""" Create research logger. """
name = f"{self.name}"
path = os.path.join(self.name, 'research.log') if self.dump_results else None
self.logger = create_logger(name, path, self.loglevel)
def dump_research(self):
""" Dump research object. """
with open(os.path.join(self.name, 'research.dill'), 'wb') as f:
dill.dump(self, f)
with open(os.path.join(self.name, 'research.txt'), 'w') as f:
f.write(str(self))
@classmethod
def load(cls, name):
""" Load research object. """
if not cls.folder_is_research(name):
raise TypeError(f'Folder "{name}" is not research folder.')
return cls._load(name)
def _load(name):
with open(os.path.join(name, 'research.dill'), 'rb') as f:
research = dill.load(f)
if research.dump_results:
research.results = ResearchResults(research.name, research.dump_results)
research.profiler = ResearchProfiler(research.name, research.profile)
research.results.load()
research.profiler.load()
return research
@classmethod
def remove(cls, name, ask=True, force=False):
""" Remove research folder.
Parameters
----------
name : str
research path to remove.
ask : bool, optional
display a dialogue with a question about removing or not, by default True.
force : bool
Remove folder even if it is not research folder.
"""
if not os.path.exists(name):
warnings.warn(f"Folder {name} doesn't exist.")
else:
if not force:
if not cls.folder_is_research(name):
raise ValueError(f'{name} is not a research folder.')
answer = True
if ask:
answer = input(f'Remove {name}? [y/n]').lower()
answer = len(answer) > 0 and 'yes'.startswith(answer)
if answer:
shutil.rmtree(name)
@classmethod
def folder_is_research(cls, name):
""" Check if folder contains research."""
if not os.path.exists(name):
raise FileNotFoundError(f"Folder {name} doesn't exist.")
return os.path.isfile(os.path.join(name, 'research.dill'))
def __str__(self):
spacing = ' ' * 4
repr = ''
params = ['name', 'workers', 'branches', 'n_iters', 'devices', 'dump_results',
'parallel', 'loglevel', 'executor_target', 'executor_class']
params_repr = []
for param in params:
params_repr += [f"{param}: {getattr(self, param, None)}"]
params_repr = '\n'.join(params_repr)
items = {'params': params_repr, 'experiment': str(self.experiment), 'domain': str(self.domain)}
for name in items:
repr += f"{name}:\n"
repr += '\n'.join([spacing + item for item in str(items[name]).split('\n')])
repr += 2 * '\n'
return repr
def __del__(self):
self.terminate()
class ResearchMonitor:
#pylint:disable=attribute-defined-outside-init
""" Class to get signals from experiment and other objects and store all states.
Parameters
----------
research : Research
Research object
path : str, optional
path to save signals, by default None
bar : bool or class
use progress bar or not.
"""
COLUMNS = ['time', 'task_idx', 'id', 'it', 'name', 'status', 'exception', 'worker', 'pid', 'worker_pid',
'finished', 'withdrawn', 'remains']
SHARED_VARIABLES = ['finished_experiments', 'finished_iterations', 'remained_experiments',
'generated_experiments']
def __init__(self, research, path=None, bar=True):
self.queue = mp.JoinableQueue()
self.research = research
self.path = path
self.exceptions = mp.Manager().list()
self.bar = tqdm.tqdm(disable=(not bar), position = 0, leave = True) if isinstance(bar, bool) else bar
self.shared_values = mp.Manager().dict()
for key in self.SHARED_VARIABLES:
self.shared_values[key] = 0
self.current_iterations = mp.Manager().dict()
self.n_iters = self.research.n_iters
self.stop_signal = mp.JoinableQueue()
self.dump = False
def __getattr__(self, key):
if key in self.SHARED_VARIABLES:
return self.shared_values[key]
raise AttributeError(f'Unknown attribute: {key}')
def __setattr__(self, key, value):
if key in self.SHARED_VARIABLES:
self.shared_values[key] = value
else:
super().__setattr__(key, value)
@property
def total(self):
""" Total number of iterations or experiments in the current moment. It changes after domain updates. """
if self.n_iters:
return self.finished_iterations + self.n_iters * (self.in_queue + self.remained_experiments)
return self.finished_experiments + self.in_queue + self.remained_experiments
@property
def in_progress(self):
""" The number of experiments in progress. """
return len(self.current_iterations)
@property
def in_queue(self):
""" The number of experimenys in queue of tasks. """
return self.generated_experiments - self.finished_experiments
def send(self, status, experiment=None, worker=None, **kwargs):
""" Send signal to monitor. """
signal = {
'time': str(datetime.datetime.now()),
'status': status,
**kwargs
}
if experiment is not None:
signal = {**signal, **{
'id': experiment.id,
'pid': experiment.executor.pid,
}}
if worker is not None:
signal = {**signal, **{
'worker': worker.index,
'worker_pid': worker.pid,
}}
self.queue.put(signal)
if 'exception' in signal:
self.exceptions.append(signal)
def start_experiment(self, experiment):
"""" Signal when experiment starts. """
self.send('START_EXP', experiment, experiment.executor.worker)
def stop_experiment(self, experiment):
"""" Signal when experiment stops. """
self.send('FINISH_EXP', experiment, experiment.executor.worker, it=experiment.iteration)
def execute_iteration(self, name, experiment):
"""" Signal for iteration execution. """
self.send('EXECUTE_IT', experiment, experiment.executor.worker, name=name, it=experiment.iteration)
def fail_item_execution(self, name, experiment, msg):
"""" Signal for iteration execution fail. """
self.send('FAIL_IT', experiment, experiment.executor.worker, name=name, it=experiment.iteration, exception=msg)
def stop_iteration(self, name, experiment):
"""" Signal for StopIteration exception. """
self.send('STOP_IT', experiment, experiment.executor.worker, name=name, it=experiment.iteration)
def handler(self):
""" Signals handler. """
signal = self.queue.get()
filename = os.path.join(self.path, 'monitor.csv')
with self.bar as progress:
while signal is not None:
status = signal.get('status')
if status == 'TASKS':
self.remained_experiments = signal['remains']
self.generated_experiments = signal['generated']
elif status == 'START_EXP':
self.current_iterations[signal['id']] = 0
elif status == 'EXECUTE_IT':
self.current_iterations[signal['id']] = signal['it']
elif status == 'FINISH_EXP':
self.current_iterations.pop(signal['id'])
self.finished_iterations += signal['it'] + 1
self.finished_experiments += 1
if status in ['START_EXP', 'EXECUTE_IT', 'FINISH_EXP']:
if self.n_iters:
progress.n = self.finished_iterations + sum(self.current_iterations.values())
else:
progress.n = self.finished_experiments + len(self.current_iterations)
progress.total = self.total
progress.refresh()
if self.dump:
with open(filename, 'a') as f:
writer = csv.writer(f)
writer.writerow([str(signal.get(column, '')) for column in self.COLUMNS])
signal = self.queue.get()
self.stop_signal.put(None)
def start(self, dump):
""" Start handler. """
self.dump = dump
if self.dump:
filename = os.path.join(self.path, 'monitor.csv')
if not os.path.exists(filename):
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(self.COLUMNS)
mp.Process(target=self.handler).start()
def stop(self):
""" Stop handler. """
self.queue.put(None)
self.stop_signal.get()
tqdm.tqdm._instances.clear() #pylint:disable=protected-access
| 40.812903 | 120 | 0.594926 |
e6042cf3e4d0670b43aaa0c37d21b6f2313011da | 13,243 | py | Python | trkRhinoPy.py | tkahng/trkRhinoPython | a7d11ed1007355393c141d1584528a49951cbab4 | [
"MIT"
] | null | null | null | trkRhinoPy.py | tkahng/trkRhinoPython | a7d11ed1007355393c141d1584528a49951cbab4 | [
"MIT"
] | null | null | null | trkRhinoPy.py | tkahng/trkRhinoPython | a7d11ed1007355393c141d1584528a49951cbab4 | [
"MIT"
] | 2 | 2020-11-05T22:24:10.000Z | 2021-06-20T17:02:38.000Z | # -*- coding: utf-8 -*-
import Rhino
import rhinoscriptsyntax as rs
import scriptcontext as sc
import json
import ast
def intFlipBool(tf):
return abs(tf-1)
'''QuickTag'''
def setQuickTag(obj, tagVal):
rs.SetUserText(obj, 'tag', tagVal)
def getQuickTag(tagVal):
rs.Command('_SelKeyValue tag {}'.format(tagVal))
def objsSetQuickTag():
objs = rs.GetObjects('select objects to tag', preselect=True)
tagVal = rs.GetString('Tag Value')
map(lambda x: setQuickTag(x, tagVal), objs)
def objsGetQuickTag():
tagVal = rs.GetString('Tag Value')
rs.Command('_SelKeyValue tag {}'.format(tagVal))
'''object user text utils'''
def setSourceLayer(obj, source):
sourceLayer = rs.ObjectLayer(source)
rs.SetUserText(obj, 'source Layer', sourceLayer)
def copySourceLayer(obj, source):
sourceLayer = rs.ObjectLayer(source)
rs.ObjectLayer(obj, sourceLayer)
def getSourceKeys(source, bakename=False):
if rs.IsUserText(source) == 0:
print 'no keys'
return
if bakename == False:
return [ x for x in rs.GetUserText(source) if "BakeName" not in x ]
else:
return [ x for x in rs.GetUserText(source)]
def sourceKeyValue(source):
keys = getSourceKeys(source)
values = map(lambda x: rs.GetUserText(source, x), keys)
return keys, values
def copySourceData(obj, source):
if rs.IsUserText(source) == 0:
return
keyValue = sourceKeyValue(source)
# print keyValue
map(lambda x, y: rs.SetUserText(obj, x, y), keyValue[0], keyValue[1])
def valuesFromLayer(obj):
"""get values from layer name
Arguments:
obj {obj} -- any object with a attribute descriptive layer name
Returns:
list -- list of values
"""
layer = rs.ObjectLayer(obj)
if "::" in layer:
layer = layer.split("::")
layer = layer[-1]
if " " in layer:
values = layer.split(" ")
return values
else:
return [layer]
def swapParentLayer(obj, newparent):
layer = rs.ObjectLayer(obj)
if "::" in layer:
splitlayer = layer.split("::")
# currentParent = splitlayer[0]
splitlayer[0] = newparent
newlayer = "::".join(splitlayer)
# newlayer = layer.replace(currentParent, newparent)
rs.ObjectLayer(obj, newlayer)
def hatchFromSrf(srf):
border = rs.DuplicateSurfaceBorder(srf, type=0)
hatch = rs.AddHatches(border, "SOLID")
rs.DeleteObjects(border)
return hatch
def setValueByLayer(obj, keys):
keys = keys.split()
values = valuesFromLayer(obj)
# values[1], values[-1] = values[-1], values[1]
kv = zip(keys, values)
map(lambda x: rs.SetUserText(obj, x[0], x[1]), kv)
def setSrfAreaValue(obj):
area = calcArea(obj)
rs.SetUserText(obj, "area", str(area[0]))
rs.SetUserText(obj, "areapy", str(area[1]))
return area[0]
def setBrepFA(obj):
faces = getBottomFace(obj)
area = calcAreas(faces)
rs.SetUserText(obj, "area", str(area[0]))
rs.SetUserText(obj, "areapy", str(area[1]))
rs.DeleteObjects(faces)
def setObjArea(obj):
area = calcArea(obj)
rs.SetUserText(obj, "area", str(area[0]))
rs.SetUserText(obj, "areapy", str(area[1]))
return area[0]
def setObjAreaValue(obj):
if rs.IsSurface(obj):
setSrfAreaValue(obj)
elif rs.IsPolysurface(obj) and rs.IsPolysurfaceClosed(obj):
setBrepFA(obj)
else:
rs.SetUserText(obj, "area", 'na')
def setBrepHeight(obj):
if rs.IsPolysurface(obj) and rs.IsPolysurfaceClosed(obj):
height = brepGetZ(obj)
height = height[2]
rs.SetUserText(obj, "height", str(height))
def boolToggle(input):
if len(input) == 0:
return False
else:
return True
"""Geo Utils """
def castToM(islen, value):
factor = 0.001
if not islen:
factor = factor*factor
docUnits = rs.UnitSystem()
if docUnits == 2:
return value * factor
else:
return value
# def castMToUnit(value):
# factor = 0.001
# docUnits = rs.UnitSystem()
# if docUnits == 2:
# return value * factor
# else:
# return value
# def calcCrvArea(crv):
# crv = rs.coercegeometry(crv)
# area = Rhino.Geometry.AreaMassProperties.Compute(crv)
# totalArea = round(castToM(False, area), 2)
# totalAreaPy = round(totalArea/3.3058, 2)
# return [totalArea, totalAreaPy]
def calcArea(srf):
area = Rhino.Geometry.AreaMassProperties.Compute(rs.coercegeometry(srf)).Area
# area = rs.SurfaceArea(srf)[0]
totalArea = round(castToM(False, area), 2)
totalAreaPy = round(totalArea/3.3058, 2)
return [totalArea, totalAreaPy]
# txt = rs.ClipboardText(totalArea)
def calcAreas(srfs):
areas = []
for srf in srfs:
# areas.append(rs.SurfaceArea(srf)[0])
areas.append(Rhino.Geometry.AreaMassProperties.Compute(rs.coercegeometry(srf)).Area)
# totalArea = round(castToM(False, sum(areas)), 2)
# totalAreaPy = round(totalArea/3.3058, 2)
totalArea = castToM(False, sum(areas))
totalAreaPy = totalArea/3.3058
return [round(totalArea, 2), round(totalAreaPy, 2)]
# txt = rs.ClipboardText(totalArea)
def rebuildSrfCrv(obj):
crv = rs.DuplicateSurfaceBorder(obj, type=0)
map(lambda x: rs.SimplifyCurve(x), crv)
return crv
def rebuildBrep(obj):
srfs = rs.ExplodePolysurfaces(obj)
crvs = map(rebuildSrfCrv, srfs)
rs.DeleteObjects(srfs)
newSrfs = map(rs.AddPlanarSrf, crvs)
# newSrfs = rs.AddPlanarSrf(crvs)
rs.DeleteObjects(crvs)
newbrep = rs.JoinSurfaces(newSrfs, delete_input=True)
try:
copySourceLayer(newbrep, obj)
copySourceData(newbrep, obj)
except:
pass
rs.DeleteObject(obj)
# return newbrep
def getBottomFace(obj):
faces = rs.ExplodePolysurfaces(obj)
output = []
[output.append(face) if getSrfNormal(face).Z == -1 else rs.DeleteObject(face) for face in faces]
return output
def getSrfNormal(srf):
domainU = rs.SurfaceDomain(srf, 0)
domainV = rs.SurfaceDomain(srf, 1)
u = domainU[1]/2.0
v = domainV[1]/2.0
point = rs.EvaluateSurface(srf, u, v)
param = rs.SurfaceClosestPoint(srf, point)
return rs.SurfaceNormal(srf, param)
def getSrfFrame(srf):
domainU = rs.SurfaceDomain(srf, 0)
domainV = rs.SurfaceDomain(srf, 1)
u = domainU[1]/2.0
v = domainV[1]/2.0
point = rs.EvaluateSurface(srf, u, v)
param = rs.SurfaceClosestPoint(srf, point)
return rs.SurfaceFrame(srf, param)
def offsetInside(crv, dist):
rs.SimplifyCurve(crv)
centroid = rs.CurveAreaCentroid(crv)
return rs.OffsetCurve(crv, centroid[0], dist)
def brepGetZ(obj):
box = rs.BoundingBox(obj)
minZ = box[0].Z
maxZ = box[-1].Z
height = maxZ - minZ
return minZ, maxZ, round(height, 3)
def objBBPts(obj):
box = rs.BoundingBox(obj)
minZ = box[0]
maxZ = box[-2]
mid = (box[0] + box[-2])/2
return minZ, maxZ, mid
# rs.addpoint
def moveSrftoZ(srf):
domainU = rs.SurfaceDomain(srf, 0)
domainV = rs.SurfaceDomain(srf, 1)
u = domainU[1]/2.0
v = domainV[1]/2.0
point = rs.EvaluateSurface(srf, u, v)
# vec = [0, 0, point.Z]
# vec = rs.VectorReverse(vec)
# vec = [0,0,vec.Z]
rs.MoveObjects(srf, rs.VectorReverse([0, 0, point.Z]))
def BrepFootPrintRegion(breps):
edgecrvs = []
for brep in breps:
edgecrvs.extend([e.DuplicateCurve() for e in brep.Edges])
crvregion = Rhino.Geometry.Curve.CreateBooleanRegions(edgecrvs, Rhino.Geometry.Plane.WorldXY, True, Rhino.RhinoDoc.ActiveDoc.ModelAbsoluteTolerance)
outcrvs = []
for i in range(crvregion.RegionCount):
outcrvs.extend(crvregion.RegionCurves(i))
return outcrvs
def BrepFootPrintUnion(breps):
breploops = []
for brep in breps:
breploops.extend([face.OuterLoop for face in brep.Faces])
crvs = []
for loop in breploops:
crv = Rhino.Geometry.Curve.ProjectToPlane(loop.To3dCurve(), Rhino.Geometry.Plane.WorldXY)
areaprop = Rhino.Geometry.AreaMassProperties.Compute(crv)
if areaprop != None:
crvs.append(crv)
crvunion = Rhino.Geometry.Curve.CreateBooleanUnion(crvs, Rhino.RhinoDoc.ActiveDoc.ModelAbsoluteTolerance)
return crvunion
"""Level Tools"""
def brepPtZPair(brep):
el = round(brepGetZ(brep)[0], 3)
return [brep, el]
def srfPtZPair(srf):
domainU = rs.SurfaceDomain(srf, 0)
domainV = rs.SurfaceDomain(srf, 1)
u = domainU[1]/2.0
v = domainV[1]/2.0
point = rs.EvaluateSurface(srf, u, v)
el = round(point.Z, 3)
return [srf, el]
def crvPtZpair(crv):
el = round(brepGetZ(crv)[0], 3)
return [crv, el]
# def crvPtZpair(crv):
# domain = rs.CurveDomain(crv)
# t = domain[1]/2.0
# point = rs.EvaluateCurve(crv, t)
# el = round(point.Z, 3)
# return [crv, el]
def setObjZPair(obj):
if rs.IsBlockInstance(obj):
# pt = rs.CreatePoint(obj)
return [obj, round(objBBPts(obj)[0].Z, 3)]
elif rs.IsCurve(obj):
return crvPtZpair(obj)
elif rs.IsPolysurfaceClosed(obj):
return brepPtZPair(obj)
elif rs.IsSurface(obj):
return srfPtZPair(obj)
elif rs.IsPoint(obj):
pt = rs.CreatePoint(obj)
return [obj, round(pt.Z, 3)]
# elif rs.IsBlockInstance(obj):
# # pt = rs.CreatePoint(obj)
# return [obj, round(objBBPts(obj)[0].Z, 3)]
else:
pass
def groupByElevation(objs, isUG):
pairs = map(setObjZPair, objs)
values = set(map(lambda x:x[1], pairs))
newpairs = [[y for y in pairs if y[1]==x] for x in values]
return sorted(newpairs, key=lambda x:x[0][1], reverse=isUG)
def setLevel(sortedpairs, isUG, func):
for idx, pairs in enumerate(sortedpairs, start=1):
grade = 'ag'
if isUG:
idx = -idx
grade = 'ug'
map(lambda x: func(x, idx, grade), pairs)
def setDictforDatum(x, idx, grade):
keys = 'level grade elevation'
keys = keys.split()
vals = [idx, grade, str(x[1])]
lvldict = dict(zip(keys, vals))
rs.SetUserText(x[0], 'lvldict', lvldict)
def setLevelforObj(x, idx, grade):
rs.SetUserText(x[0], "level", str(idx))
rs.SetUserText(x[0], "grade", grade)
rs.SetUserText(x[0], "elevation", str(x[1]))
def setLevelforDatum(x, idx, grade):
rs.SetUserText(x[0], "level", str(idx))
rs.SetUserText(x[0], "grade", grade)
rs.SetUserText(x[0], "elevation", str(x[1]))
# rs.SetUserText(x[0], 'pt', )
setBrepHeight(x[0])
def cPlaneLvl():
userstr = rs.GetDocumentUserText("levels")
objdict = ast.literal_eval(userstr)
for i in objdict:
lvlname = i["level"]
elevation = float(i["elevation"])
newplane = rs.CreatePlane((0,0,elevation))
rs.ViewCPlane(None, newplane)
rs.AddNamedCPlane(lvlname)
def createSectionBox(obj):
box = rs.BoundingBox(obj)
bb = rs.AddBox(box)
faces = rs.ExplodePolysurfaces(bb)
faces = [rs.FlipSurface(x) for x in faces]
planes = [getSrfFrame(x) for x in faces]
clips = [rs.AddClippingPlane(x, 1000, 1000) for x in planes]
group = rs.AddGroup()
rs.AddObjectsToGroup(clips, group)
return clips
"""Dictionary Json"""
def createObjDict(obj):
# objkeys = [ x for x in rs.GetUserText(obj) if "BakeName" not in x ]
objkeys = [ x for x in rs.GetUserText(obj)]
objvals = map(lambda x: rs.GetUserText(obj, x), objkeys)
return dict(zip(objkeys, objvals))
"""Block Tools"""
def redefineBlockScale(block):
block_name = rs.BlockInstanceName(block)
# rs.RenameBlock (block_name, "{}-old".format(block_name))
blockXform = rs.BlockInstanceXform(block)
plane = rs.PlaneTransform(rs.WorldXYPlane(), blockXform)
cob = rs.XformChangeBasis(plane, rs.WorldXYPlane())
cob_inverse = rs.XformChangeBasis(rs.WorldXYPlane(), plane)
refBlock = rs.TransformObjects(block, cob_inverse, True )
exploded = rs.ExplodeBlockInstance(refBlock)
rs.AddBlock(exploded, rs.WorldXYPlane().Origin, block_name, True)
newBlock = rs.InsertBlock2(block_name, cob)
copySourceLayer(newBlock, block)
try:
copySourceData(newBlock, block)
except:
pass
rs.DeleteObjects(block)
def resetBlockScale(block):
block_name = rs.BlockInstanceName(block)
blockXform = rs.BlockInstanceXform(block)
plane = rs.PlaneTransform(rs.WorldXYPlane(), blockXform)
# newplane = rs.CreatePlane(plane.Origin, plane.XAxis, plane.YAxis)
# cob = rs.XformChangeBasis(newplane, rs.WorldXYPlane())
cob = rs.XformChangeBasis(plane, rs.WorldXYPlane())
newBlock = rs.InsertBlock2(block_name, cob)
copySourceLayer(newBlock, block)
try:
copySourceData(newBlock, block)
except:
pass
rs.DeleteObjects(block)
return newBlock
def blkFace(obj):
cameraPos = rs.ViewCamera()
cameraPos.Z = 0
xform = rs.BlockInstanceXform(obj)
plane = rs.PlaneTransform(rs.WorldXYPlane(), xform)
viewdir = rs.VectorUnitize(cameraPos - plane.Origin)
angle = rs.VectorAngle(viewdir, plane.YAxis)
newXform = rs.XformRotation3(plane.YAxis, viewdir, plane.Origin)
rs.TransformObject(obj, newXform) | 28.47957 | 152 | 0.645775 |
ca8a7a4627fb503739858eaaf686964767e7cbb4 | 5,376 | py | Python | gbpservice/contrib/tests/unit/nfp/configurator/lib/test_filter.py | ansao-aci/group-based-policy | d80a94dcb51bfce6994cd18339d3c79a7cb54bfe | [
"Apache-2.0"
] | 2 | 2017-12-13T10:33:28.000Z | 2019-07-03T19:01:42.000Z | gbpservice/contrib/tests/unit/nfp/configurator/lib/test_filter.py | ansao-aci/group-based-policy | d80a94dcb51bfce6994cd18339d3c79a7cb54bfe | [
"Apache-2.0"
] | 4 | 2015-04-14T00:27:44.000Z | 2020-08-31T11:20:17.000Z | gbpservice/contrib/tests/unit/nfp/configurator/lib/test_filter.py | ansao-aci/group-based-policy | d80a94dcb51bfce6994cd18339d3c79a7cb54bfe | [
"Apache-2.0"
] | 4 | 2015-04-10T16:03:47.000Z | 2020-08-31T06:06:32.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import filter_base
from gbpservice.contrib.nfp.configurator.lib import data_filter
class FilterTest(filter_base.BaseTestCase):
"""Test class to test data_filter.py using unittest framework """
def __init__(self, *args, **kwargs):
super(FilterTest, self).__init__(*args, **kwargs)
def setUp(self):
"""Prepare setup for every test case.
"""
super(FilterTest, self).setUp()
self.context = {}
self.filter_obj = data_filter.Filter(None, None)
def tearDown(self):
""" Reset values after test case execution.
"""
super(FilterTest, self).tearDown()
self.context = {}
def _make_test(self, context, method, **filters):
""" To reduce the boilerplate. """
retval = self.filter_obj.call(self.context,
self.filter_obj.make_msg(method,
**filters))
return retval
def _make_vpn_service_context(self):
"""Make the context for the vpn service
Returns: vpn service context
"""
service_info = self._test_get_vpn_info()
self.context['service_info'] = service_info
return self.context
def _make_fw_service_context(self):
"""Make the context for the fw service
Returns: fw service context
"""
service_info = self._test_get_fw_info()
self.context['service_info'] = service_info
return self.context
def test_get_vpn_service_with_tenantid(self):
"""Test get_vpn_services() of data_filter.py by passing
only tenant_id in filters
"""
retval = self._make_test(self._make_vpn_service_context(),
'get_vpn_services',
filters=(
{'tenant_id': [self.vpnservices[0]['tenant_id']]}))
self.assertEqual(retval, [self.vpnservices[0], self.vpnservices[1]])
def test_get_vpn_service_with_ids(self):
"""Test get_vpn_services() of data_filter.py by passing
vpn service ids in filters
"""
retval = self._make_test(self._make_vpn_service_context(),
'get_vpn_services',
ids=[self.vpnservices[0]['id'],
self.vpnservices[1]['id']])
self.assertEqual(retval, [self.vpnservices[0], self.vpnservices[1]])
def test_get_ipsec_conns(self):
"""Test get_ipsec_conns() of data_filter.py
"""
retval = self._make_test(
self._make_vpn_service_context(),
'get_ipsec_conns',
tenant_id=[self.ipsec_site_connections[0]['tenant_id']],
peer_address=[self.ipsec_site_connections[0]['peer_address']])
self.assertEqual(retval, self.ipsec_site_connections)
def test_get_vpn_servicecontext_ipsec_service_type(self):
"""Test get_vpn_servicecontext() of data_filter.py
based on ipsec service type
"""
service_info = self._test_get_vpn_info()
self.context['service_info'] = service_info
retval = self.filter_obj._get_vpn_servicecontext(
self.context,
{'tenant_id': self.vpnservices[0]['tenant_id'],
'vpnservice_id': self.vpnservices[0]['id'],
'ipsec_site_connections':
self.ipsec_site_connections[0]['id']})
expected = {'service': self.vpnservices[0],
'siteconns': [{'connection':
self.ipsec_site_connections[0],
'ikepolicy': self.ikepolicies[0],
'ipsecpolicy': self.ipsecpolicies[0]
}]}
self.assertEqual(retval, [expected])
def test_get_vpn_servicecontext_ipsec_service_type_with_tenantid(self):
"""Test get_vpn_servicecontext() of data_filter.py
based on ipsec service type and tenant_id
"""
service_info = self._test_get_vpn_info()
self.context['service_info'] = service_info
retval = self.filter_obj._get_vpn_servicecontext(
self.context,
{'tenant_id': self.vpnservices[0]['tenant_id'],
})
expected = {'service': self.vpnservices[0],
'siteconns': [{'connection':
self.ipsec_site_connections[0],
'ikepolicy': self.ikepolicies[0],
'ipsecpolicy': self.ipsecpolicies[0]
}]}
self.assertEqual(retval, [expected])
| 39.822222 | 79 | 0.574591 |
1d0258f198c24a747b1c0af6e75d0d20c9e9299f | 47,030 | py | Python | alphafold/model/all_atom.py | milot-mirdita/alphafold | 8a24cc8d22feb2b7ae9bf78f910ce97e9d4403e8 | [
"Apache-2.0"
] | 45 | 2021-07-18T05:26:21.000Z | 2022-03-15T18:22:48.000Z | alphafold/model/all_atom.py | milot-mirdita/alphafold | 8a24cc8d22feb2b7ae9bf78f910ce97e9d4403e8 | [
"Apache-2.0"
] | 33 | 2021-08-12T14:06:16.000Z | 2022-03-31T18:47:49.000Z | alphafold/model/all_atom.py | milot-mirdita/alphafold | 8a24cc8d22feb2b7ae9bf78f910ce97e9d4403e8 | [
"Apache-2.0"
] | 20 | 2021-07-18T03:34:47.000Z | 2022-02-19T02:51:09.000Z | # Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ops for all atom representations.
Generally we employ two different representations for all atom coordinates,
one is atom37 where each heavy atom corresponds to a given position in a 37
dimensional array, This mapping is non amino acid specific, but each slot
corresponds to an atom of a given name, for example slot 12 always corresponds
to 'C delta 1', positions that are not present for a given amino acid are
zeroed out and denoted by a mask.
The other representation we employ is called atom14, this is a more dense way
of representing atoms with 14 slots. Here a given slot will correspond to a
different kind of atom depending on amino acid type, for example slot 5
corresponds to 'N delta 2' for Aspargine, but to 'C delta 1' for Isoleucine.
14 is chosen because it is the maximum number of heavy atoms for any standard
amino acid.
The order of slots can be found in 'residue_constants.residue_atoms'.
Internally the model uses the atom14 representation because it is
computationally more efficient.
The internal atom14 representation is turned into the atom37 at the output of
the network to facilitate easier conversion to existing protein datastructures.
"""
from typing import Dict, Optional
import jax
import jax.numpy as jnp
import numpy as np
from alphafold.common import residue_constants
from alphafold.model import r3
from alphafold.model import utils
def squared_difference(x, y):
return jnp.square(x - y)
def get_chi_atom_indices():
"""Returns atom indices needed to compute chi angles for all residue types.
Returns:
A tensor of shape [residue_types=21, chis=4, atoms=4]. The residue types are
in the order specified in residue_constants.restypes + unknown residue type
at the end. For chi angles which are not defined on the residue, the
positions indices are by default set to 0.
"""
chi_atom_indices = []
for residue_name in residue_constants.restypes:
residue_name = residue_constants.restype_1to3[residue_name]
residue_chi_angles = residue_constants.chi_angles_atoms[residue_name]
atom_indices = []
for chi_angle in residue_chi_angles:
atom_indices.append(
[residue_constants.atom_order[atom] for atom in chi_angle])
for _ in range(4 - len(atom_indices)):
atom_indices.append([0, 0, 0, 0]) # For chi angles not defined on the AA.
chi_atom_indices.append(atom_indices)
chi_atom_indices.append([[0, 0, 0, 0]] * 4) # For UNKNOWN residue.
return jnp.asarray(chi_atom_indices)
def atom14_to_atom37(atom14_data: jnp.ndarray, # (N, 14, ...)
batch: Dict[str, jnp.ndarray]
) -> jnp.ndarray: # (N, 37, ...)
"""Convert atom14 to atom37 representation."""
assert len(atom14_data.shape) in [2, 3]
assert 'residx_atom37_to_atom14' in batch
assert 'atom37_atom_exists' in batch
atom37_data = utils.batched_gather(atom14_data,
batch['residx_atom37_to_atom14'],
batch_dims=1)
if len(atom14_data.shape) == 2:
atom37_data *= batch['atom37_atom_exists']
elif len(atom14_data.shape) == 3:
atom37_data *= batch['atom37_atom_exists'][:, :,
None].astype(atom37_data.dtype)
return atom37_data
def atom37_to_atom14(
atom37_data: jnp.ndarray, # (N, 37, ...)
batch: Dict[str, jnp.ndarray]) -> jnp.ndarray: # (N, 14, ...)
"""Convert atom14 to atom37 representation."""
assert len(atom37_data.shape) in [2, 3]
assert 'residx_atom14_to_atom37' in batch
assert 'atom14_atom_exists' in batch
atom14_data = utils.batched_gather(atom37_data,
batch['residx_atom14_to_atom37'],
batch_dims=1)
if len(atom37_data.shape) == 2:
atom14_data *= batch['atom14_atom_exists'].astype(atom14_data.dtype)
elif len(atom37_data.shape) == 3:
atom14_data *= batch['atom14_atom_exists'][:, :,
None].astype(atom14_data.dtype)
return atom14_data
def atom37_to_frames(
aatype: jnp.ndarray, # (...)
all_atom_positions: jnp.ndarray, # (..., 37, 3)
all_atom_mask: jnp.ndarray, # (..., 37)
) -> Dict[str, jnp.ndarray]:
"""Computes the frames for the up to 8 rigid groups for each residue.
The rigid groups are defined by the possible torsions in a given amino acid.
We group the atoms according to their dependence on the torsion angles into
"rigid groups". E.g., the position of atoms in the chi2-group depend on
chi1 and chi2, but do not depend on chi3 or chi4.
Jumper et al. (2021) Suppl. Table 2 and corresponding text.
Args:
aatype: Amino acid type, given as array with integers.
all_atom_positions: atom37 representation of all atom coordinates.
all_atom_mask: atom37 representation of mask on all atom coordinates.
Returns:
Dictionary containing:
* 'rigidgroups_gt_frames': 8 Frames corresponding to 'all_atom_positions'
represented as flat 12 dimensional array.
* 'rigidgroups_gt_exists': Mask denoting whether the atom positions for
the given frame are available in the ground truth, e.g. if they were
resolved in the experiment.
* 'rigidgroups_group_exists': Mask denoting whether given group is in
principle present for given amino acid type.
* 'rigidgroups_group_is_ambiguous': Mask denoting whether frame is
affected by naming ambiguity.
* 'rigidgroups_alt_gt_frames': 8 Frames with alternative atom renaming
corresponding to 'all_atom_positions' represented as flat
12 dimensional array.
"""
# 0: 'backbone group',
# 1: 'pre-omega-group', (empty)
# 2: 'phi-group', (currently empty, because it defines only hydrogens)
# 3: 'psi-group',
# 4,5,6,7: 'chi1,2,3,4-group'
aatype_in_shape = aatype.shape
# If there is a batch axis, just flatten it away, and reshape everything
# back at the end of the function.
aatype = jnp.reshape(aatype, [-1])
all_atom_positions = jnp.reshape(all_atom_positions, [-1, 37, 3])
all_atom_mask = jnp.reshape(all_atom_mask, [-1, 37])
# Create an array with the atom names.
# shape (num_restypes, num_rigidgroups, 3_atoms): (21, 8, 3)
restype_rigidgroup_base_atom_names = np.full([21, 8, 3], '', dtype=object)
# 0: backbone frame
restype_rigidgroup_base_atom_names[:, 0, :] = ['C', 'CA', 'N']
# 3: 'psi-group'
restype_rigidgroup_base_atom_names[:, 3, :] = ['CA', 'C', 'O']
# 4,5,6,7: 'chi1,2,3,4-group'
for restype, restype_letter in enumerate(residue_constants.restypes):
resname = residue_constants.restype_1to3[restype_letter]
for chi_idx in range(4):
if residue_constants.chi_angles_mask[restype][chi_idx]:
atom_names = residue_constants.chi_angles_atoms[resname][chi_idx]
restype_rigidgroup_base_atom_names[
restype, chi_idx + 4, :] = atom_names[1:]
# Create mask for existing rigid groups.
restype_rigidgroup_mask = np.zeros([21, 8], dtype=np.float32)
restype_rigidgroup_mask[:, 0] = 1
restype_rigidgroup_mask[:, 3] = 1
restype_rigidgroup_mask[:20, 4:] = residue_constants.chi_angles_mask
# Translate atom names into atom37 indices.
lookuptable = residue_constants.atom_order.copy()
lookuptable[''] = 0
restype_rigidgroup_base_atom37_idx = np.vectorize(lambda x: lookuptable[x])(
restype_rigidgroup_base_atom_names)
# Compute the gather indices for all residues in the chain.
# shape (N, 8, 3)
residx_rigidgroup_base_atom37_idx = utils.batched_gather(
restype_rigidgroup_base_atom37_idx, aatype)
# Gather the base atom positions for each rigid group.
base_atom_pos = utils.batched_gather(
all_atom_positions,
residx_rigidgroup_base_atom37_idx,
batch_dims=1)
# Compute the Rigids.
gt_frames = r3.rigids_from_3_points(
point_on_neg_x_axis=r3.vecs_from_tensor(base_atom_pos[:, :, 0, :]),
origin=r3.vecs_from_tensor(base_atom_pos[:, :, 1, :]),
point_on_xy_plane=r3.vecs_from_tensor(base_atom_pos[:, :, 2, :])
)
# Compute a mask whether the group exists.
# (N, 8)
group_exists = utils.batched_gather(restype_rigidgroup_mask, aatype)
# Compute a mask whether ground truth exists for the group
gt_atoms_exist = utils.batched_gather( # shape (N, 8, 3)
all_atom_mask.astype(jnp.float32),
residx_rigidgroup_base_atom37_idx,
batch_dims=1)
gt_exists = jnp.min(gt_atoms_exist, axis=-1) * group_exists # (N, 8)
# Adapt backbone frame to old convention (mirror x-axis and z-axis).
rots = np.tile(np.eye(3, dtype=np.float32), [8, 1, 1])
rots[0, 0, 0] = -1
rots[0, 2, 2] = -1
gt_frames = r3.rigids_mul_rots(gt_frames, r3.rots_from_tensor3x3(rots))
# The frames for ambiguous rigid groups are just rotated by 180 degree around
# the x-axis. The ambiguous group is always the last chi-group.
restype_rigidgroup_is_ambiguous = np.zeros([21, 8], dtype=np.float32)
restype_rigidgroup_rots = np.tile(np.eye(3, dtype=np.float32), [21, 8, 1, 1])
for resname, _ in residue_constants.residue_atom_renaming_swaps.items():
restype = residue_constants.restype_order[
residue_constants.restype_3to1[resname]]
chi_idx = int(sum(residue_constants.chi_angles_mask[restype]) - 1)
restype_rigidgroup_is_ambiguous[restype, chi_idx + 4] = 1
restype_rigidgroup_rots[restype, chi_idx + 4, 1, 1] = -1
restype_rigidgroup_rots[restype, chi_idx + 4, 2, 2] = -1
# Gather the ambiguity information for each residue.
residx_rigidgroup_is_ambiguous = utils.batched_gather(
restype_rigidgroup_is_ambiguous, aatype)
residx_rigidgroup_ambiguity_rot = utils.batched_gather(
restype_rigidgroup_rots, aatype)
# Create the alternative ground truth frames.
alt_gt_frames = r3.rigids_mul_rots(
gt_frames, r3.rots_from_tensor3x3(residx_rigidgroup_ambiguity_rot))
gt_frames_flat12 = r3.rigids_to_tensor_flat12(gt_frames)
alt_gt_frames_flat12 = r3.rigids_to_tensor_flat12(alt_gt_frames)
# reshape back to original residue layout
gt_frames_flat12 = jnp.reshape(gt_frames_flat12, aatype_in_shape + (8, 12))
gt_exists = jnp.reshape(gt_exists, aatype_in_shape + (8,))
group_exists = jnp.reshape(group_exists, aatype_in_shape + (8,))
gt_frames_flat12 = jnp.reshape(gt_frames_flat12, aatype_in_shape + (8, 12))
residx_rigidgroup_is_ambiguous = jnp.reshape(residx_rigidgroup_is_ambiguous,
aatype_in_shape + (8,))
alt_gt_frames_flat12 = jnp.reshape(alt_gt_frames_flat12,
aatype_in_shape + (8, 12,))
return {
'rigidgroups_gt_frames': gt_frames_flat12, # (..., 8, 12)
'rigidgroups_gt_exists': gt_exists, # (..., 8)
'rigidgroups_group_exists': group_exists, # (..., 8)
'rigidgroups_group_is_ambiguous':
residx_rigidgroup_is_ambiguous, # (..., 8)
'rigidgroups_alt_gt_frames': alt_gt_frames_flat12, # (..., 8, 12)
}
def atom37_to_torsion_angles(
aatype: jnp.ndarray, # (B, N)
all_atom_pos: jnp.ndarray, # (B, N, 37, 3)
all_atom_mask: jnp.ndarray, # (B, N, 37)
placeholder_for_undefined=False,
) -> Dict[str, jnp.ndarray]:
"""Computes the 7 torsion angles (in sin, cos encoding) for each residue.
The 7 torsion angles are in the order
'[pre_omega, phi, psi, chi_1, chi_2, chi_3, chi_4]',
here pre_omega denotes the omega torsion angle between the given amino acid
and the previous amino acid.
Args:
aatype: Amino acid type, given as array with integers.
all_atom_pos: atom37 representation of all atom coordinates.
all_atom_mask: atom37 representation of mask on all atom coordinates.
placeholder_for_undefined: flag denoting whether to set masked torsion
angles to zero.
Returns:
Dict containing:
* 'torsion_angles_sin_cos': Array with shape (B, N, 7, 2) where the final
2 dimensions denote sin and cos respectively
* 'alt_torsion_angles_sin_cos': same as 'torsion_angles_sin_cos', but
with the angle shifted by pi for all chi angles affected by the naming
ambiguities.
* 'torsion_angles_mask': Mask for which chi angles are present.
"""
# Map aatype > 20 to 'Unknown' (20).
aatype = jnp.minimum(aatype, 20)
# Compute the backbone angles.
num_batch, num_res = aatype.shape
pad = jnp.zeros([num_batch, 1, 37, 3], jnp.float32)
prev_all_atom_pos = jnp.concatenate([pad, all_atom_pos[:, :-1, :, :]], axis=1)
pad = jnp.zeros([num_batch, 1, 37], jnp.float32)
prev_all_atom_mask = jnp.concatenate([pad, all_atom_mask[:, :-1, :]], axis=1)
# For each torsion angle collect the 4 atom positions that define this angle.
# shape (B, N, atoms=4, xyz=3)
pre_omega_atom_pos = jnp.concatenate(
[prev_all_atom_pos[:, :, 1:3, :], # prev CA, C
all_atom_pos[:, :, 0:2, :] # this N, CA
], axis=-2)
phi_atom_pos = jnp.concatenate(
[prev_all_atom_pos[:, :, 2:3, :], # prev C
all_atom_pos[:, :, 0:3, :] # this N, CA, C
], axis=-2)
psi_atom_pos = jnp.concatenate(
[all_atom_pos[:, :, 0:3, :], # this N, CA, C
all_atom_pos[:, :, 4:5, :] # this O
], axis=-2)
# Collect the masks from these atoms.
# Shape [batch, num_res]
pre_omega_mask = (
jnp.prod(prev_all_atom_mask[:, :, 1:3], axis=-1) # prev CA, C
* jnp.prod(all_atom_mask[:, :, 0:2], axis=-1)) # this N, CA
phi_mask = (
prev_all_atom_mask[:, :, 2] # prev C
* jnp.prod(all_atom_mask[:, :, 0:3], axis=-1)) # this N, CA, C
psi_mask = (
jnp.prod(all_atom_mask[:, :, 0:3], axis=-1) * # this N, CA, C
all_atom_mask[:, :, 4]) # this O
# Collect the atoms for the chi-angles.
# Compute the table of chi angle indices. Shape: [restypes, chis=4, atoms=4].
chi_atom_indices = get_chi_atom_indices()
# Select atoms to compute chis. Shape: [batch, num_res, chis=4, atoms=4].
atom_indices = utils.batched_gather(
params=chi_atom_indices, indices=aatype, axis=0, batch_dims=0)
# Gather atom positions. Shape: [batch, num_res, chis=4, atoms=4, xyz=3].
chis_atom_pos = utils.batched_gather(
params=all_atom_pos, indices=atom_indices, axis=-2,
batch_dims=2)
# Copy the chi angle mask, add the UNKNOWN residue. Shape: [restypes, 4].
chi_angles_mask = list(residue_constants.chi_angles_mask)
chi_angles_mask.append([0.0, 0.0, 0.0, 0.0])
chi_angles_mask = jnp.asarray(chi_angles_mask)
# Compute the chi angle mask. I.e. which chis angles exist according to the
# aatype. Shape [batch, num_res, chis=4].
chis_mask = utils.batched_gather(params=chi_angles_mask, indices=aatype,
axis=0, batch_dims=0)
# Constrain the chis_mask to those chis, where the ground truth coordinates of
# all defining four atoms are available.
# Gather the chi angle atoms mask. Shape: [batch, num_res, chis=4, atoms=4].
chi_angle_atoms_mask = utils.batched_gather(
params=all_atom_mask, indices=atom_indices, axis=-1,
batch_dims=2)
# Check if all 4 chi angle atoms were set. Shape: [batch, num_res, chis=4].
chi_angle_atoms_mask = jnp.prod(chi_angle_atoms_mask, axis=[-1])
chis_mask = chis_mask * (chi_angle_atoms_mask).astype(jnp.float32)
# Stack all torsion angle atom positions.
# Shape (B, N, torsions=7, atoms=4, xyz=3)
torsions_atom_pos = jnp.concatenate(
[pre_omega_atom_pos[:, :, None, :, :],
phi_atom_pos[:, :, None, :, :],
psi_atom_pos[:, :, None, :, :],
chis_atom_pos
], axis=2)
# Stack up masks for all torsion angles.
# shape (B, N, torsions=7)
torsion_angles_mask = jnp.concatenate(
[pre_omega_mask[:, :, None],
phi_mask[:, :, None],
psi_mask[:, :, None],
chis_mask
], axis=2)
# Create a frame from the first three atoms:
# First atom: point on x-y-plane
# Second atom: point on negative x-axis
# Third atom: origin
# r3.Rigids (B, N, torsions=7)
torsion_frames = r3.rigids_from_3_points(
point_on_neg_x_axis=r3.vecs_from_tensor(torsions_atom_pos[:, :, :, 1, :]),
origin=r3.vecs_from_tensor(torsions_atom_pos[:, :, :, 2, :]),
point_on_xy_plane=r3.vecs_from_tensor(torsions_atom_pos[:, :, :, 0, :]))
# Compute the position of the forth atom in this frame (y and z coordinate
# define the chi angle)
# r3.Vecs (B, N, torsions=7)
forth_atom_rel_pos = r3.rigids_mul_vecs(
r3.invert_rigids(torsion_frames),
r3.vecs_from_tensor(torsions_atom_pos[:, :, :, 3, :]))
# Normalize to have the sin and cos of the torsion angle.
# jnp.ndarray (B, N, torsions=7, sincos=2)
torsion_angles_sin_cos = jnp.stack(
[forth_atom_rel_pos.z, forth_atom_rel_pos.y], axis=-1)
torsion_angles_sin_cos /= jnp.sqrt(
jnp.sum(jnp.square(torsion_angles_sin_cos), axis=-1, keepdims=True)
+ 1e-8)
# Mirror psi, because we computed it from the Oxygen-atom.
torsion_angles_sin_cos *= jnp.asarray(
[1., 1., -1., 1., 1., 1., 1.])[None, None, :, None]
# Create alternative angles for ambiguous atom names.
chi_is_ambiguous = utils.batched_gather(
jnp.asarray(residue_constants.chi_pi_periodic), aatype)
mirror_torsion_angles = jnp.concatenate(
[jnp.ones([num_batch, num_res, 3]),
1.0 - 2.0 * chi_is_ambiguous], axis=-1)
alt_torsion_angles_sin_cos = (
torsion_angles_sin_cos * mirror_torsion_angles[:, :, :, None])
if placeholder_for_undefined:
# Add placeholder torsions in place of undefined torsion angles
# (e.g. N-terminus pre-omega)
placeholder_torsions = jnp.stack([
jnp.ones(torsion_angles_sin_cos.shape[:-1]),
jnp.zeros(torsion_angles_sin_cos.shape[:-1])
], axis=-1)
torsion_angles_sin_cos = torsion_angles_sin_cos * torsion_angles_mask[
..., None] + placeholder_torsions * (1 - torsion_angles_mask[..., None])
alt_torsion_angles_sin_cos = alt_torsion_angles_sin_cos * torsion_angles_mask[
..., None] + placeholder_torsions * (1 - torsion_angles_mask[..., None])
return {
'torsion_angles_sin_cos': torsion_angles_sin_cos, # (B, N, 7, 2)
'alt_torsion_angles_sin_cos': alt_torsion_angles_sin_cos, # (B, N, 7, 2)
'torsion_angles_mask': torsion_angles_mask # (B, N, 7)
}
def torsion_angles_to_frames(
aatype: jnp.ndarray, # (N)
backb_to_global: r3.Rigids, # (N)
torsion_angles_sin_cos: jnp.ndarray # (N, 7, 2)
) -> r3.Rigids: # (N, 8)
"""Compute rigid group frames from torsion angles.
Jumper et al. (2021) Suppl. Alg. 24 "computeAllAtomCoordinates" lines 2-10
Jumper et al. (2021) Suppl. Alg. 25 "makeRotX"
Args:
aatype: aatype for each residue
backb_to_global: Rigid transformations describing transformation from
backbone frame to global frame.
torsion_angles_sin_cos: sin and cosine of the 7 torsion angles
Returns:
Frames corresponding to all the Sidechain Rigid Transforms
"""
assert len(aatype.shape) == 1
assert len(backb_to_global.rot.xx.shape) == 1
assert len(torsion_angles_sin_cos.shape) == 3
assert torsion_angles_sin_cos.shape[1] == 7
assert torsion_angles_sin_cos.shape[2] == 2
# Gather the default frames for all rigid groups.
# r3.Rigids with shape (N, 8)
m = utils.batched_gather(residue_constants.restype_rigid_group_default_frame,
aatype)
default_frames = r3.rigids_from_tensor4x4(m)
# Create the rotation matrices according to the given angles (each frame is
# defined such that its rotation is around the x-axis).
sin_angles = torsion_angles_sin_cos[..., 0]
cos_angles = torsion_angles_sin_cos[..., 1]
# insert zero rotation for backbone group.
num_residues, = aatype.shape
sin_angles = jnp.concatenate([jnp.zeros([num_residues, 1]), sin_angles],
axis=-1)
cos_angles = jnp.concatenate([jnp.ones([num_residues, 1]), cos_angles],
axis=-1)
zeros = jnp.zeros_like(sin_angles)
ones = jnp.ones_like(sin_angles)
# all_rots are r3.Rots with shape (N, 8)
all_rots = r3.Rots(ones, zeros, zeros,
zeros, cos_angles, -sin_angles,
zeros, sin_angles, cos_angles)
# Apply rotations to the frames.
all_frames = r3.rigids_mul_rots(default_frames, all_rots)
# chi2, chi3, and chi4 frames do not transform to the backbone frame but to
# the previous frame. So chain them up accordingly.
chi2_frame_to_frame = jax.tree_map(lambda x: x[:, 5], all_frames)
chi3_frame_to_frame = jax.tree_map(lambda x: x[:, 6], all_frames)
chi4_frame_to_frame = jax.tree_map(lambda x: x[:, 7], all_frames)
chi1_frame_to_backb = jax.tree_map(lambda x: x[:, 4], all_frames)
chi2_frame_to_backb = r3.rigids_mul_rigids(chi1_frame_to_backb,
chi2_frame_to_frame)
chi3_frame_to_backb = r3.rigids_mul_rigids(chi2_frame_to_backb,
chi3_frame_to_frame)
chi4_frame_to_backb = r3.rigids_mul_rigids(chi3_frame_to_backb,
chi4_frame_to_frame)
# Recombine them to a r3.Rigids with shape (N, 8).
def _concat_frames(xall, x5, x6, x7):
return jnp.concatenate(
[xall[:, 0:5], x5[:, None], x6[:, None], x7[:, None]], axis=-1)
all_frames_to_backb = jax.tree_map(
_concat_frames,
all_frames,
chi2_frame_to_backb,
chi3_frame_to_backb,
chi4_frame_to_backb)
# Create the global frames.
# shape (N, 8)
all_frames_to_global = r3.rigids_mul_rigids(
jax.tree_map(lambda x: x[:, None], backb_to_global),
all_frames_to_backb)
return all_frames_to_global
def frames_and_literature_positions_to_atom14_pos(
aatype: jnp.ndarray, # (N)
all_frames_to_global: r3.Rigids # (N, 8)
) -> r3.Vecs: # (N, 14)
"""Put atom literature positions (atom14 encoding) in each rigid group.
Jumper et al. (2021) Suppl. Alg. 24 "computeAllAtomCoordinates" line 11
Args:
aatype: aatype for each residue.
all_frames_to_global: All per residue coordinate frames.
Returns:
Positions of all atom coordinates in global frame.
"""
# Pick the appropriate transform for every atom.
residx_to_group_idx = utils.batched_gather(
residue_constants.restype_atom14_to_rigid_group, aatype)
group_mask = jax.nn.one_hot(
residx_to_group_idx, num_classes=8) # shape (N, 14, 8)
# r3.Rigids with shape (N, 14)
map_atoms_to_global = jax.tree_map(
lambda x: jnp.sum(x[:, None, :] * group_mask, axis=-1),
all_frames_to_global)
# Gather the literature atom positions for each residue.
# r3.Vecs with shape (N, 14)
lit_positions = r3.vecs_from_tensor(
utils.batched_gather(
residue_constants.restype_atom14_rigid_group_positions, aatype))
# Transform each atom from its local frame to the global frame.
# r3.Vecs with shape (N, 14)
pred_positions = r3.rigids_mul_vecs(map_atoms_to_global, lit_positions)
# Mask out non-existing atoms.
mask = utils.batched_gather(residue_constants.restype_atom14_mask, aatype)
pred_positions = jax.tree_map(lambda x: x * mask, pred_positions)
return pred_positions
def extreme_ca_ca_distance_violations(
pred_atom_positions: jnp.ndarray, # (N, 37(14), 3)
pred_atom_mask: jnp.ndarray, # (N, 37(14))
residue_index: jnp.ndarray, # (N)
max_angstrom_tolerance=1.5
) -> jnp.ndarray:
"""Counts residues whose Ca is a large distance from its neighbor.
Measures the fraction of CA-CA pairs between consectutive amino acids that
are more than 'max_angstrom_tolerance' apart.
Args:
pred_atom_positions: Atom positions in atom37/14 representation
pred_atom_mask: Atom mask in atom37/14 representation
residue_index: Residue index for given amino acid, this is assumed to be
monotonically increasing.
max_angstrom_tolerance: Maximum distance allowed to not count as violation.
Returns:
Fraction of consecutive CA-CA pairs with violation.
"""
this_ca_pos = pred_atom_positions[:-1, 1, :] # (N - 1, 3)
this_ca_mask = pred_atom_mask[:-1, 1] # (N - 1)
next_ca_pos = pred_atom_positions[1:, 1, :] # (N - 1, 3)
next_ca_mask = pred_atom_mask[1:, 1] # (N - 1)
has_no_gap_mask = ((residue_index[1:] - residue_index[:-1]) == 1.0).astype(
jnp.float32)
ca_ca_distance = jnp.sqrt(
1e-6 + jnp.sum(squared_difference(this_ca_pos, next_ca_pos), axis=-1))
violations = (ca_ca_distance -
residue_constants.ca_ca) > max_angstrom_tolerance
mask = this_ca_mask * next_ca_mask * has_no_gap_mask
return utils.mask_mean(mask=mask, value=violations)
def between_residue_bond_loss(
pred_atom_positions: jnp.ndarray, # (N, 37(14), 3)
pred_atom_mask: jnp.ndarray, # (N, 37(14))
residue_index: jnp.ndarray, # (N)
aatype: jnp.ndarray, # (N)
tolerance_factor_soft=12.0,
tolerance_factor_hard=12.0
) -> Dict[str, jnp.ndarray]:
"""Flat-bottom loss to penalize structural violations between residues.
This is a loss penalizing any violation of the geometry around the peptide
bond between consecutive amino acids. This loss corresponds to
Jumper et al. (2021) Suppl. Sec. 1.9.11, eq 44, 45.
Args:
pred_atom_positions: Atom positions in atom37/14 representation
pred_atom_mask: Atom mask in atom37/14 representation
residue_index: Residue index for given amino acid, this is assumed to be
monotonically increasing.
aatype: Amino acid type of given residue
tolerance_factor_soft: soft tolerance factor measured in standard deviations
of pdb distributions
tolerance_factor_hard: hard tolerance factor measured in standard deviations
of pdb distributions
Returns:
Dict containing:
* 'c_n_loss_mean': Loss for peptide bond length violations
* 'ca_c_n_loss_mean': Loss for violations of bond angle around C spanned
by CA, C, N
* 'c_n_ca_loss_mean': Loss for violations of bond angle around N spanned
by C, N, CA
* 'per_residue_loss_sum': sum of all losses for each residue
* 'per_residue_violation_mask': mask denoting all residues with violation
present.
"""
assert len(pred_atom_positions.shape) == 3
assert len(pred_atom_mask.shape) == 2
assert len(residue_index.shape) == 1
assert len(aatype.shape) == 1
# Get the positions of the relevant backbone atoms.
this_ca_pos = pred_atom_positions[:-1, 1, :] # (N - 1, 3)
this_ca_mask = pred_atom_mask[:-1, 1] # (N - 1)
this_c_pos = pred_atom_positions[:-1, 2, :] # (N - 1, 3)
this_c_mask = pred_atom_mask[:-1, 2] # (N - 1)
next_n_pos = pred_atom_positions[1:, 0, :] # (N - 1, 3)
next_n_mask = pred_atom_mask[1:, 0] # (N - 1)
next_ca_pos = pred_atom_positions[1:, 1, :] # (N - 1, 3)
next_ca_mask = pred_atom_mask[1:, 1] # (N - 1)
has_no_gap_mask = ((residue_index[1:] - residue_index[:-1]) == 1.0).astype(
jnp.float32)
# Compute loss for the C--N bond.
c_n_bond_length = jnp.sqrt(
1e-6 + jnp.sum(squared_difference(this_c_pos, next_n_pos), axis=-1))
# The C-N bond to proline has slightly different length because of the ring.
next_is_proline = (
aatype[1:] == residue_constants.resname_to_idx['PRO']).astype(jnp.float32)
gt_length = (
(1. - next_is_proline) * residue_constants.between_res_bond_length_c_n[0]
+ next_is_proline * residue_constants.between_res_bond_length_c_n[1])
gt_stddev = (
(1. - next_is_proline) *
residue_constants.between_res_bond_length_stddev_c_n[0] +
next_is_proline * residue_constants.between_res_bond_length_stddev_c_n[1])
c_n_bond_length_error = jnp.sqrt(1e-6 +
jnp.square(c_n_bond_length - gt_length))
c_n_loss_per_residue = jax.nn.relu(
c_n_bond_length_error - tolerance_factor_soft * gt_stddev)
mask = this_c_mask * next_n_mask * has_no_gap_mask
c_n_loss = jnp.sum(mask * c_n_loss_per_residue) / (jnp.sum(mask) + 1e-6)
c_n_violation_mask = mask * (
c_n_bond_length_error > (tolerance_factor_hard * gt_stddev))
# Compute loss for the angles.
ca_c_bond_length = jnp.sqrt(1e-6 + jnp.sum(
squared_difference(this_ca_pos, this_c_pos), axis=-1))
n_ca_bond_length = jnp.sqrt(1e-6 + jnp.sum(
squared_difference(next_n_pos, next_ca_pos), axis=-1))
c_ca_unit_vec = (this_ca_pos - this_c_pos) / ca_c_bond_length[:, None]
c_n_unit_vec = (next_n_pos - this_c_pos) / c_n_bond_length[:, None]
n_ca_unit_vec = (next_ca_pos - next_n_pos) / n_ca_bond_length[:, None]
ca_c_n_cos_angle = jnp.sum(c_ca_unit_vec * c_n_unit_vec, axis=-1)
gt_angle = residue_constants.between_res_cos_angles_ca_c_n[0]
gt_stddev = residue_constants.between_res_bond_length_stddev_c_n[0]
ca_c_n_cos_angle_error = jnp.sqrt(
1e-6 + jnp.square(ca_c_n_cos_angle - gt_angle))
ca_c_n_loss_per_residue = jax.nn.relu(
ca_c_n_cos_angle_error - tolerance_factor_soft * gt_stddev)
mask = this_ca_mask * this_c_mask * next_n_mask * has_no_gap_mask
ca_c_n_loss = jnp.sum(mask * ca_c_n_loss_per_residue) / (jnp.sum(mask) + 1e-6)
ca_c_n_violation_mask = mask * (ca_c_n_cos_angle_error >
(tolerance_factor_hard * gt_stddev))
c_n_ca_cos_angle = jnp.sum((-c_n_unit_vec) * n_ca_unit_vec, axis=-1)
gt_angle = residue_constants.between_res_cos_angles_c_n_ca[0]
gt_stddev = residue_constants.between_res_cos_angles_c_n_ca[1]
c_n_ca_cos_angle_error = jnp.sqrt(
1e-6 + jnp.square(c_n_ca_cos_angle - gt_angle))
c_n_ca_loss_per_residue = jax.nn.relu(
c_n_ca_cos_angle_error - tolerance_factor_soft * gt_stddev)
mask = this_c_mask * next_n_mask * next_ca_mask * has_no_gap_mask
c_n_ca_loss = jnp.sum(mask * c_n_ca_loss_per_residue) / (jnp.sum(mask) + 1e-6)
c_n_ca_violation_mask = mask * (
c_n_ca_cos_angle_error > (tolerance_factor_hard * gt_stddev))
# Compute a per residue loss (equally distribute the loss to both
# neighbouring residues).
per_residue_loss_sum = (c_n_loss_per_residue +
ca_c_n_loss_per_residue +
c_n_ca_loss_per_residue)
per_residue_loss_sum = 0.5 * (jnp.pad(per_residue_loss_sum, [[0, 1]]) +
jnp.pad(per_residue_loss_sum, [[1, 0]]))
# Compute hard violations.
violation_mask = jnp.max(
jnp.stack([c_n_violation_mask,
ca_c_n_violation_mask,
c_n_ca_violation_mask]), axis=0)
violation_mask = jnp.maximum(
jnp.pad(violation_mask, [[0, 1]]),
jnp.pad(violation_mask, [[1, 0]]))
return {'c_n_loss_mean': c_n_loss, # shape ()
'ca_c_n_loss_mean': ca_c_n_loss, # shape ()
'c_n_ca_loss_mean': c_n_ca_loss, # shape ()
'per_residue_loss_sum': per_residue_loss_sum, # shape (N)
'per_residue_violation_mask': violation_mask # shape (N)
}
def between_residue_clash_loss(
atom14_pred_positions: jnp.ndarray, # (N, 14, 3)
atom14_atom_exists: jnp.ndarray, # (N, 14)
atom14_atom_radius: jnp.ndarray, # (N, 14)
residue_index: jnp.ndarray, # (N)
overlap_tolerance_soft=1.5,
overlap_tolerance_hard=1.5
) -> Dict[str, jnp.ndarray]:
"""Loss to penalize steric clashes between residues.
This is a loss penalizing any steric clashes due to non bonded atoms in
different peptides coming too close. This loss corresponds to the part with
different residues of
Jumper et al. (2021) Suppl. Sec. 1.9.11, eq 46.
Args:
atom14_pred_positions: Predicted positions of atoms in
global prediction frame
atom14_atom_exists: Mask denoting whether atom at positions exists for given
amino acid type
atom14_atom_radius: Van der Waals radius for each atom.
residue_index: Residue index for given amino acid.
overlap_tolerance_soft: Soft tolerance factor.
overlap_tolerance_hard: Hard tolerance factor.
Returns:
Dict containing:
* 'mean_loss': average clash loss
* 'per_atom_loss_sum': sum of all clash losses per atom, shape (N, 14)
* 'per_atom_clash_mask': mask whether atom clashes with any other atom
shape (N, 14)
"""
assert len(atom14_pred_positions.shape) == 3
assert len(atom14_atom_exists.shape) == 2
assert len(atom14_atom_radius.shape) == 2
assert len(residue_index.shape) == 1
# Create the distance matrix.
# (N, N, 14, 14)
dists = jnp.sqrt(1e-10 + jnp.sum(
squared_difference(
atom14_pred_positions[:, None, :, None, :],
atom14_pred_positions[None, :, None, :, :]),
axis=-1))
# Create the mask for valid distances.
# shape (N, N, 14, 14)
dists_mask = (atom14_atom_exists[:, None, :, None] *
atom14_atom_exists[None, :, None, :])
# Mask out all the duplicate entries in the lower triangular matrix.
# Also mask out the diagonal (atom-pairs from the same residue) -- these atoms
# are handled separately.
dists_mask *= (
residue_index[:, None, None, None] < residue_index[None, :, None, None])
# Backbone C--N bond between subsequent residues is no clash.
c_one_hot = jax.nn.one_hot(2, num_classes=14)
n_one_hot = jax.nn.one_hot(0, num_classes=14)
neighbour_mask = ((residue_index[:, None, None, None] +
1) == residue_index[None, :, None, None])
c_n_bonds = neighbour_mask * c_one_hot[None, None, :,
None] * n_one_hot[None, None, None, :]
dists_mask *= (1. - c_n_bonds)
# Disulfide bridge between two cysteines is no clash.
cys_sg_idx = residue_constants.restype_name_to_atom14_names['CYS'].index('SG')
cys_sg_one_hot = jax.nn.one_hot(cys_sg_idx, num_classes=14)
disulfide_bonds = (cys_sg_one_hot[None, None, :, None] *
cys_sg_one_hot[None, None, None, :])
dists_mask *= (1. - disulfide_bonds)
# Compute the lower bound for the allowed distances.
# shape (N, N, 14, 14)
dists_lower_bound = dists_mask * (atom14_atom_radius[:, None, :, None] +
atom14_atom_radius[None, :, None, :])
# Compute the error.
# shape (N, N, 14, 14)
dists_to_low_error = dists_mask * jax.nn.relu(
dists_lower_bound - overlap_tolerance_soft - dists)
# Compute the mean loss.
# shape ()
mean_loss = (jnp.sum(dists_to_low_error)
/ (1e-6 + jnp.sum(dists_mask)))
# Compute the per atom loss sum.
# shape (N, 14)
per_atom_loss_sum = (jnp.sum(dists_to_low_error, axis=[0, 2]) +
jnp.sum(dists_to_low_error, axis=[1, 3]))
# Compute the hard clash mask.
# shape (N, N, 14, 14)
clash_mask = dists_mask * (
dists < (dists_lower_bound - overlap_tolerance_hard))
# Compute the per atom clash.
# shape (N, 14)
per_atom_clash_mask = jnp.maximum(
jnp.max(clash_mask, axis=[0, 2]),
jnp.max(clash_mask, axis=[1, 3]))
return {'mean_loss': mean_loss, # shape ()
'per_atom_loss_sum': per_atom_loss_sum, # shape (N, 14)
'per_atom_clash_mask': per_atom_clash_mask # shape (N, 14)
}
def within_residue_violations(
atom14_pred_positions: jnp.ndarray, # (N, 14, 3)
atom14_atom_exists: jnp.ndarray, # (N, 14)
atom14_dists_lower_bound: jnp.ndarray, # (N, 14, 14)
atom14_dists_upper_bound: jnp.ndarray, # (N, 14, 14)
tighten_bounds_for_loss=0.0,
) -> Dict[str, jnp.ndarray]:
"""Loss to penalize steric clashes within residues.
This is a loss penalizing any steric violations or clashes of non-bonded atoms
in a given peptide. This loss corresponds to the part with
the same residues of
Jumper et al. (2021) Suppl. Sec. 1.9.11, eq 46.
Args:
atom14_pred_positions: Predicted positions of atoms in
global prediction frame
atom14_atom_exists: Mask denoting whether atom at positions exists for given
amino acid type
atom14_dists_lower_bound: Lower bound on allowed distances.
atom14_dists_upper_bound: Upper bound on allowed distances
tighten_bounds_for_loss: Extra factor to tighten loss
Returns:
Dict containing:
* 'per_atom_loss_sum': sum of all clash losses per atom, shape (N, 14)
* 'per_atom_clash_mask': mask whether atom clashes with any other atom
shape (N, 14)
"""
assert len(atom14_pred_positions.shape) == 3
assert len(atom14_atom_exists.shape) == 2
assert len(atom14_dists_lower_bound.shape) == 3
assert len(atom14_dists_upper_bound.shape) == 3
# Compute the mask for each residue.
# shape (N, 14, 14)
dists_masks = (1. - jnp.eye(14, 14)[None])
dists_masks *= (atom14_atom_exists[:, :, None] *
atom14_atom_exists[:, None, :])
# Distance matrix
# shape (N, 14, 14)
dists = jnp.sqrt(1e-10 + jnp.sum(
squared_difference(
atom14_pred_positions[:, :, None, :],
atom14_pred_positions[:, None, :, :]),
axis=-1))
# Compute the loss.
# shape (N, 14, 14)
dists_to_low_error = jax.nn.relu(
atom14_dists_lower_bound + tighten_bounds_for_loss - dists)
dists_to_high_error = jax.nn.relu(
dists - (atom14_dists_upper_bound - tighten_bounds_for_loss))
loss = dists_masks * (dists_to_low_error + dists_to_high_error)
# Compute the per atom loss sum.
# shape (N, 14)
per_atom_loss_sum = (jnp.sum(loss, axis=1) +
jnp.sum(loss, axis=2))
# Compute the violations mask.
# shape (N, 14, 14)
violations = dists_masks * ((dists < atom14_dists_lower_bound) |
(dists > atom14_dists_upper_bound))
# Compute the per atom violations.
# shape (N, 14)
per_atom_violations = jnp.maximum(
jnp.max(violations, axis=1), jnp.max(violations, axis=2))
return {'per_atom_loss_sum': per_atom_loss_sum, # shape (N, 14)
'per_atom_violations': per_atom_violations # shape (N, 14)
}
def find_optimal_renaming(
atom14_gt_positions: jnp.ndarray, # (N, 14, 3)
atom14_alt_gt_positions: jnp.ndarray, # (N, 14, 3)
atom14_atom_is_ambiguous: jnp.ndarray, # (N, 14)
atom14_gt_exists: jnp.ndarray, # (N, 14)
atom14_pred_positions: jnp.ndarray, # (N, 14, 3)
atom14_atom_exists: jnp.ndarray, # (N, 14)
) -> jnp.ndarray: # (N):
"""Find optimal renaming for ground truth that maximizes LDDT.
Jumper et al. (2021) Suppl. Alg. 26
"renameSymmetricGroundTruthAtoms" lines 1-5
Args:
atom14_gt_positions: Ground truth positions in global frame of ground truth.
atom14_alt_gt_positions: Alternate ground truth positions in global frame of
ground truth with coordinates of ambiguous atoms swapped relative to
'atom14_gt_positions'.
atom14_atom_is_ambiguous: Mask denoting whether atom is among ambiguous
atoms, see Jumper et al. (2021) Suppl. Table 3
atom14_gt_exists: Mask denoting whether atom at positions exists in ground
truth.
atom14_pred_positions: Predicted positions of atoms in
global prediction frame
atom14_atom_exists: Mask denoting whether atom at positions exists for given
amino acid type
Returns:
Float array of shape [N] with 1. where atom14_alt_gt_positions is closer to
prediction and 0. otherwise
"""
assert len(atom14_gt_positions.shape) == 3
assert len(atom14_alt_gt_positions.shape) == 3
assert len(atom14_atom_is_ambiguous.shape) == 2
assert len(atom14_gt_exists.shape) == 2
assert len(atom14_pred_positions.shape) == 3
assert len(atom14_atom_exists.shape) == 2
# Create the pred distance matrix.
# shape (N, N, 14, 14)
pred_dists = jnp.sqrt(1e-10 + jnp.sum(
squared_difference(
atom14_pred_positions[:, None, :, None, :],
atom14_pred_positions[None, :, None, :, :]),
axis=-1))
# Compute distances for ground truth with original and alternative names.
# shape (N, N, 14, 14)
gt_dists = jnp.sqrt(1e-10 + jnp.sum(
squared_difference(
atom14_gt_positions[:, None, :, None, :],
atom14_gt_positions[None, :, None, :, :]),
axis=-1))
alt_gt_dists = jnp.sqrt(1e-10 + jnp.sum(
squared_difference(
atom14_alt_gt_positions[:, None, :, None, :],
atom14_alt_gt_positions[None, :, None, :, :]),
axis=-1))
# Compute LDDT's.
# shape (N, N, 14, 14)
lddt = jnp.sqrt(1e-10 + squared_difference(pred_dists, gt_dists))
alt_lddt = jnp.sqrt(1e-10 + squared_difference(pred_dists, alt_gt_dists))
# Create a mask for ambiguous atoms in rows vs. non-ambiguous atoms
# in cols.
# shape (N ,N, 14, 14)
mask = (atom14_gt_exists[:, None, :, None] * # rows
atom14_atom_is_ambiguous[:, None, :, None] * # rows
atom14_gt_exists[None, :, None, :] * # cols
(1. - atom14_atom_is_ambiguous[None, :, None, :])) # cols
# Aggregate distances for each residue to the non-amibuguous atoms.
# shape (N)
per_res_lddt = jnp.sum(mask * lddt, axis=[1, 2, 3])
alt_per_res_lddt = jnp.sum(mask * alt_lddt, axis=[1, 2, 3])
# Decide for each residue, whether alternative naming is better.
# shape (N)
alt_naming_is_better = (alt_per_res_lddt < per_res_lddt).astype(jnp.float32)
return alt_naming_is_better # shape (N)
def frame_aligned_point_error(
pred_frames: r3.Rigids, # shape (num_frames)
target_frames: r3.Rigids, # shape (num_frames)
frames_mask: jnp.ndarray, # shape (num_frames)
pred_positions: r3.Vecs, # shape (num_positions)
target_positions: r3.Vecs, # shape (num_positions)
positions_mask: jnp.ndarray, # shape (num_positions)
length_scale: float,
l1_clamp_distance: Optional[float] = None,
epsilon=1e-4) -> jnp.ndarray: # shape ()
"""Measure point error under different alignments.
Jumper et al. (2021) Suppl. Alg. 28 "computeFAPE"
Computes error between two structures with B points under A alignments derived
from the given pairs of frames.
Args:
pred_frames: num_frames reference frames for 'pred_positions'.
target_frames: num_frames reference frames for 'target_positions'.
frames_mask: Mask for frame pairs to use.
pred_positions: num_positions predicted positions of the structure.
target_positions: num_positions target positions of the structure.
positions_mask: Mask on which positions to score.
length_scale: length scale to divide loss by.
l1_clamp_distance: Distance cutoff on error beyond which gradients will
be zero.
epsilon: small value used to regularize denominator for masked average.
Returns:
Masked Frame Aligned Point Error.
"""
assert pred_frames.rot.xx.ndim == 1
assert target_frames.rot.xx.ndim == 1
assert frames_mask.ndim == 1, frames_mask.ndim
assert pred_positions.x.ndim == 1
assert target_positions.x.ndim == 1
assert positions_mask.ndim == 1
# Compute array of predicted positions in the predicted frames.
# r3.Vecs (num_frames, num_positions)
local_pred_pos = r3.rigids_mul_vecs(
jax.tree_map(lambda r: r[:, None], r3.invert_rigids(pred_frames)),
jax.tree_map(lambda x: x[None, :], pred_positions))
# Compute array of target positions in the target frames.
# r3.Vecs (num_frames, num_positions)
local_target_pos = r3.rigids_mul_vecs(
jax.tree_map(lambda r: r[:, None], r3.invert_rigids(target_frames)),
jax.tree_map(lambda x: x[None, :], target_positions))
# Compute errors between the structures.
# jnp.ndarray (num_frames, num_positions)
error_dist = jnp.sqrt(
r3.vecs_squared_distance(local_pred_pos, local_target_pos)
+ epsilon)
if l1_clamp_distance:
error_dist = jnp.clip(error_dist, 0, l1_clamp_distance)
normed_error = error_dist / length_scale
normed_error *= jnp.expand_dims(frames_mask, axis=-1)
normed_error *= jnp.expand_dims(positions_mask, axis=-2)
normalization_factor = (
jnp.sum(frames_mask, axis=-1) *
jnp.sum(positions_mask, axis=-1))
return (jnp.sum(normed_error, axis=(-2, -1)) /
(epsilon + normalization_factor))
def _make_renaming_matrices():
"""Matrices to map atoms to symmetry partners in ambiguous case."""
# As the atom naming is ambiguous for 7 of the 20 amino acids, provide
# alternative groundtruth coordinates where the naming is swapped
restype_3 = [
residue_constants.restype_1to3[res] for res in residue_constants.restypes
]
restype_3 += ['UNK']
# Matrices for renaming ambiguous atoms.
all_matrices = {res: np.eye(14, dtype=np.float32) for res in restype_3}
for resname, swap in residue_constants.residue_atom_renaming_swaps.items():
correspondences = np.arange(14)
for source_atom_swap, target_atom_swap in swap.items():
source_index = residue_constants.restype_name_to_atom14_names[
resname].index(source_atom_swap)
target_index = residue_constants.restype_name_to_atom14_names[
resname].index(target_atom_swap)
correspondences[source_index] = target_index
correspondences[target_index] = source_index
renaming_matrix = np.zeros((14, 14), dtype=np.float32)
for index, correspondence in enumerate(correspondences):
renaming_matrix[index, correspondence] = 1.
all_matrices[resname] = renaming_matrix.astype(np.float32)
renaming_matrices = np.stack([all_matrices[restype] for restype in restype_3])
return renaming_matrices
RENAMING_MATRICES = _make_renaming_matrices()
def get_alt_atom14(aatype, positions, mask):
"""Get alternative atom14 positions.
Constructs renamed atom positions for ambiguous residues.
Jumper et al. (2021) Suppl. Table 3 "Ambiguous atom names due to 180 degree-
rotation-symmetry"
Args:
aatype: Amino acid at given position
positions: Atom positions as r3.Vecs in atom14 representation, (N, 14)
mask: Atom masks in atom14 representation, (N, 14)
Returns:
renamed atom positions, renamed atom mask
"""
# pick the transformation matrices for the given residue sequence
# shape (num_res, 14, 14)
renaming_transform = utils.batched_gather(
jnp.asarray(RENAMING_MATRICES), aatype)
positions = jax.tree_map(lambda x: x[:, :, None], positions)
alternative_positions = jax.tree_map(
lambda x: jnp.sum(x, axis=1), positions * renaming_transform)
# Create the mask for the alternative ground truth (differs from the
# ground truth mask, if only one of the atoms in an ambiguous pair has a
# ground truth position)
alternative_mask = jnp.sum(mask[..., None] * renaming_transform, axis=1)
return alternative_positions, alternative_mask
| 41.11014 | 82 | 0.692069 |
3d3455d9e99fcdc6667b240277fdeabd9b7ee09c | 9,768 | py | Python | fhirclient/r4models/messageheader.py | Healthedata1/Flask-PL | 88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1 | [
"MIT"
] | null | null | null | fhirclient/r4models/messageheader.py | Healthedata1/Flask-PL | 88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1 | [
"MIT"
] | null | null | null | fhirclient/r4models/messageheader.py | Healthedata1/Flask-PL | 88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/MessageHeader) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class MessageHeader(domainresource.DomainResource):
""" A resource that describes a message that is exchanged between systems.
The header for a message exchange that is either requesting or responding
to an action. The reference(s) that are the subject of the action as well
as other information related to the action are typically transmitted in a
bundle in which the MessageHeader resource instance is the first resource
in the bundle.
"""
resource_type = "MessageHeader"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.author = None
""" The source of the decision.
Type `FHIRReference` (represented as `dict` in JSON). """
self.definition = None
""" Link to the definition for this message.
Type `str`. """
self.destination = None
""" Message destination application(s).
List of `MessageHeaderDestination` items (represented as `dict` in JSON). """
self.enterer = None
""" The source of the data entry.
Type `FHIRReference` (represented as `dict` in JSON). """
self.eventCoding = None
""" Code for the event this message represents or link to event
definition.
Type `Coding` (represented as `dict` in JSON). """
self.eventUri = None
""" Code for the event this message represents or link to event
definition.
Type `str`. """
self.focus = None
""" The actual content of the message.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.reason = None
""" Cause of event.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.response = None
""" If this is a reply to prior message.
Type `MessageHeaderResponse` (represented as `dict` in JSON). """
self.responsible = None
""" Final responsibility for event.
Type `FHIRReference` (represented as `dict` in JSON). """
self.sender = None
""" Real world sender of the message.
Type `FHIRReference` (represented as `dict` in JSON). """
self.source = None
""" Message source application.
Type `MessageHeaderSource` (represented as `dict` in JSON). """
super(MessageHeader, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MessageHeader, self).elementProperties()
js.extend([
("author", "author", fhirreference.FHIRReference, False, None, False),
("definition", "definition", str, False, None, False),
("destination", "destination", MessageHeaderDestination, True, None, False),
("enterer", "enterer", fhirreference.FHIRReference, False, None, False),
("eventCoding", "eventCoding", coding.Coding, False, "event", True),
("eventUri", "eventUri", str, False, "event", True),
("focus", "focus", fhirreference.FHIRReference, True, None, False),
("reason", "reason", codeableconcept.CodeableConcept, False, None, False),
("response", "response", MessageHeaderResponse, False, None, False),
("responsible", "responsible", fhirreference.FHIRReference, False, None, False),
("sender", "sender", fhirreference.FHIRReference, False, None, False),
("source", "source", MessageHeaderSource, False, None, True),
])
return js
from . import backboneelement
class MessageHeaderDestination(backboneelement.BackboneElement):
""" Message destination application(s).
The destination application which the message is intended for.
"""
resource_type = "MessageHeaderDestination"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.endpoint = None
""" Actual destination address or id.
Type `str`. """
self.name = None
""" Name of system.
Type `str`. """
self.receiver = None
""" Intended "real-world" recipient for the data.
Type `FHIRReference` (represented as `dict` in JSON). """
self.target = None
""" Particular delivery destination within the destination.
Type `FHIRReference` (represented as `dict` in JSON). """
super(MessageHeaderDestination, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MessageHeaderDestination, self).elementProperties()
js.extend([
("endpoint", "endpoint", str, False, None, True),
("name", "name", str, False, None, False),
("receiver", "receiver", fhirreference.FHIRReference, False, None, False),
("target", "target", fhirreference.FHIRReference, False, None, False),
])
return js
class MessageHeaderResponse(backboneelement.BackboneElement):
""" If this is a reply to prior message.
Information about the message that this message is a response to. Only
present if this message is a response.
"""
resource_type = "MessageHeaderResponse"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" ok | transient-error | fatal-error.
Type `str`. """
self.details = None
""" Specific list of hints/warnings/errors.
Type `FHIRReference` (represented as `dict` in JSON). """
self.identifier = None
""" Id of original message.
Type `str`. """
super(MessageHeaderResponse, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MessageHeaderResponse, self).elementProperties()
js.extend([
("code", "code", str, False, None, True),
("details", "details", fhirreference.FHIRReference, False, None, False),
("identifier", "identifier", str, False, None, True),
])
return js
class MessageHeaderSource(backboneelement.BackboneElement):
""" Message source application.
The source application from which this message originated.
"""
resource_type = "MessageHeaderSource"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contact = None
""" Human contact for problems.
Type `ContactPoint` (represented as `dict` in JSON). """
self.endpoint = None
""" Actual message source address or id.
Type `str`. """
self.name = None
""" Name of system.
Type `str`. """
self.software = None
""" Name of software running the system.
Type `str`. """
self.version = None
""" Version of software running.
Type `str`. """
super(MessageHeaderSource, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MessageHeaderSource, self).elementProperties()
js.extend([
("contact", "contact", contactpoint.ContactPoint, False, None, False),
("endpoint", "endpoint", str, False, None, True),
("name", "name", str, False, None, False),
("software", "software", str, False, None, False),
("version", "version", str, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import coding
except ImportError:
coding = sys.modules[__package__ + '.coding']
try:
from . import contactpoint
except ImportError:
contactpoint = sys.modules[__package__ + '.contactpoint']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
| 38.15625 | 111 | 0.596642 |
780e53b760c8e7fe44921100aa96864ffd9b79d0 | 5,457 | py | Python | src/deep_speaker/audio.py | googleinterns/deepspeech-reconstruction | 72f28d1e9064d221b3421c302a8725a8c71859ee | [
"Apache-2.0"
] | 3 | 2021-08-20T16:40:09.000Z | 2022-02-08T23:17:52.000Z | src/deep_speaker/audio.py | googleinterns/deepspeech-reconstruction | 72f28d1e9064d221b3421c302a8725a8c71859ee | [
"Apache-2.0"
] | 1 | 2022-03-22T04:16:15.000Z | 2022-03-22T04:26:03.000Z | src/deep_speaker/audio.py | googleinterns/deepspeech-reconstruction | 72f28d1e9064d221b3421c302a8725a8c71859ee | [
"Apache-2.0"
] | 1 | 2021-04-28T21:51:12.000Z | 2021-04-28T21:51:12.000Z | import logging
import os
import pickle
from collections import defaultdict
from pathlib import Path
import librosa
import numpy as np
from tqdm import tqdm
from constants import SAMPLE_RATE, NUM_FBANKS
from utils import find_files, ensures_dir
from audio2mfcc import audio2mfcc
logger = logging.getLogger(__name__)
def read_mfcc(input_filename, sample_rate, trim_silence=True):
audio = Audio.read(input_filename, sample_rate)
if trim_silence:
energy = np.abs(audio)
silence_threshold = np.percentile(energy, 95)
offsets = np.where(energy > silence_threshold)[0]
else:
offsets = (0, -1)
# left_blank_duration_ms = (1000.0 * offsets[0]) // self.sample_rate # frame_id to duration (ms)
# right_blank_duration_ms = (1000.0 * (len(audio) - offsets[-1])) // self.sample_rate
# TODO: could use trim_silence() here or a better VAD.
audio_voice_only = audio[offsets[0]:offsets[-1]]
mfcc = mfcc_fbank(audio_voice_only, sample_rate)
return mfcc
def read_mfcc_from_pkl(input_filename, i=0, idx=None):
with open(input_filename, 'rb') as f:
data = pickle.load(f)
if idx is not None:
data = data[idx]
if type(data) == list and len(data) == 1:
data = data[0]
frames_features = normalize_frames(data[i])
return np.array(frames_features, dtype=np.float32)
def extract_speaker_and_utterance_ids(filename: str): # LIBRI.
# 'audio/dev-other/116/288045/116-288045-0000.flac'
speaker, _, basename = Path(filename).parts[-3:]
filename.split('-')
utterance = os.path.splitext(basename.split('-', 1)[-1])[0]
assert basename.split('-')[0] == speaker
return speaker, utterance
class Audio:
def __init__(self, cache_dir: str, audio_dir: str = None, sample_rate: int = SAMPLE_RATE, ext='flac'):
self.ext = ext
self.cache_dir = os.path.join(cache_dir, 'audio-fbanks')
ensures_dir(self.cache_dir)
if audio_dir is not None:
self.build_cache(os.path.expanduser(audio_dir), sample_rate)
self.speakers_to_utterances = defaultdict(dict)
for cache_file in find_files(self.cache_dir, ext='npy'):
# /path/to/speaker_utterance.npy
speaker_id, utterance_id = Path(cache_file).stem.split('_')
self.speakers_to_utterances[speaker_id][utterance_id] = cache_file
@property
def speaker_ids(self):
return sorted(self.speakers_to_utterances)
@staticmethod
def trim_silence(audio, threshold):
"""Removes silence at the beginning and end of a sample."""
energy = librosa.feature.rms(audio)
frames = np.nonzero(np.array(energy > threshold))
indices = librosa.core.frames_to_samples(frames)[1]
# Note: indices can be an empty array, if the whole audio was silence.
audio_trim = audio[0:0]
left_blank = audio[0:0]
right_blank = audio[0:0]
if indices.size:
audio_trim = audio[indices[0]:indices[-1]]
left_blank = audio[:indices[0]] # slice before.
right_blank = audio[indices[-1]:] # slice after.
return audio_trim, left_blank, right_blank
@staticmethod
def read(filename, sample_rate=SAMPLE_RATE):
audio, sr = librosa.load(filename, sr=sample_rate, mono=True, dtype=np.float32)
assert sr == sample_rate
return audio
def build_cache(self, audio_dir, sample_rate):
logger.info(f'audio_dir: {audio_dir}.')
logger.info(f'sample_rate: {sample_rate:,} hz.')
audio_files = find_files(audio_dir, ext=self.ext)
audio_files_count = len(audio_files)
assert audio_files_count != 0, f'Could not find any {self.ext} files in {audio_dir}.'
logger.info(f'Found {audio_files_count:,} files in {audio_dir}.')
with tqdm(audio_files) as bar:
for audio_filename in bar:
bar.set_description(audio_filename)
self.cache_audio_file(audio_filename, sample_rate)
def cache_audio_file(self, input_filename, sample_rate):
sp, utt = extract_speaker_and_utterance_ids(input_filename)
cache_filename = os.path.join(self.cache_dir, f'{sp}_{utt}.npy')
if not os.path.isfile(cache_filename):
try:
mfcc = read_mfcc(input_filename, sample_rate)
np.save(cache_filename, mfcc)
except librosa.util.exceptions.ParameterError as e:
logger.error(e)
def pad_mfcc(mfcc, max_length): # num_frames, nfilt=64.
if len(mfcc) < max_length:
mfcc = np.vstack((mfcc, np.tile(np.zeros(mfcc.shape[1]), (max_length - len(mfcc), 1))))
return mfcc
def mfcc_fbank(signal: np.array, sample_rate: int): # 1D signal array.
# Returns MFCC with shape (num_frames, n_filters, 3).
if False:
from python_speech_features import fbank
filter_banks, _ = fbank(signal, samplerate=sample_rate, nfilt=NUM_FBANKS)
else:
filter_banks = audio2mfcc(signal)
frames_features = normalize_frames(filter_banks)
# delta_1 = delta(filter_banks, N=1)
# delta_2 = delta(delta_1, N=1)
# frames_features = np.transpose(np.stack([filter_banks, delta_1, delta_2]), (1, 2, 0))
return np.array(frames_features, dtype=np.float32) # Float32 precision is enough here.
def normalize_frames(m, epsilon=1e-12):
return [(v - np.mean(v)) / max(np.std(v), epsilon) for v in m]
| 38.702128 | 106 | 0.666117 |
f106629d84e9d2866da84de08b6af24c9da56caa | 7,292 | py | Python | python-backend/tests/constants.py | MaxWardle/mds | 15d8405e6e95af98da9588f353c5d6692d1aa3d6 | [
"Apache-2.0"
] | null | null | null | python-backend/tests/constants.py | MaxWardle/mds | 15d8405e6e95af98da9588f353c5d6692d1aa3d6 | [
"Apache-2.0"
] | null | null | null | python-backend/tests/constants.py | MaxWardle/mds | 15d8405e6e95af98da9588f353c5d6692d1aa3d6 | [
"Apache-2.0"
] | null | null | null | # Test Constants
DUMMY_USER_KWARGS = {'create_user': 'DummyUser', 'update_user': 'DummyUser'}
TEST_MINE_GUID = '4fc855aa-728a-48f2-a3df-85ce1336b01a'
TEST_MINE_NAME = 'test_mine_name'
TEST_MINE_NO = 'BLAH000'
TEST_MINE_TYPE_GUID = 'df4939f0-04c9-49dc-bf04-2a3c3b8b0e14'
TEST_MINE_TYPE_DETAIL_GUID = 'df4939f0-04c9-49dc-bf04-2a3c3b8b0e15'
TEST_PARTY_PER_GUID_1 = 'df4939f0-04c9-49dc-bf04-2a3c3b8b0e24'
TEST_PARTY_PER_FIRST_NAME_1 = 'first_name'
TEST_PARTY_PER_PARTY_NAME_1 = 'test_surname'
TEST_PARTY_PER_EMAIL_1 = 'test1@test.com'
TEST_PARTY_PER_PHONE_1 = '123-456-7890'
TEST_PARTY_PER_PHONE_EXT_1 = '1234'
TEST_PARTY_PER_GUID_2 = 'df4939f0-04c9-49dc-bf04-2a3c3b8b0e22'
TEST_PARTY_PER_FIRST_NAME_2 = 'first_name2'
TEST_PARTY_PER_PARTY_NAME_2 = 'test_surname2'
TEST_PARTY_PER_EMAIL_2 = 'test2@test.com'
TEST_PARTY_PER_PHONE_2 = '123-456-7891'
TEST_PARTY_PER_PHONE_EXT_2 = '3215'
TEST_PARTY_PER_GUID_3 = 'df4939f0-04c9-49dc-bf04-2a3c3b8b0e23'
TEST_PARTY_PER_FIRST_NAME_3 = 'first_name3'
TEST_PARTY_PER_PARTY_NAME_3 = 'test_surname3'
TEST_PARTY_PER_EMAIL_3 = 'test3@test.com'
TEST_PARTY_PER_PHONE_3 = '123-456-7892'
TEST_PARTY_PER_PHONE_EXT_3 = '7895'
TEST_PARTY_ORG_GUID = 'edfdab51-cd4e-4575-9ebc-d120d14232ae'
TEST_PARTY_ORG_NAME = 'test_company'
TEST_PARTY_ORG_EMAIL = 'test_company@test.com'
TEST_PARTY_ORG_PHONE = '123-456-7832'
TEST_PARTY_ORG_EXT = '7855'
TEST_SUB_DIVISION_CODES = ['BC']
TEST_SUB_DIVISION_CODE_DESCRIPTIONS = ['British Columbia']
TEST_MANAGER_GUID = 'bf24fa6b-d6ad-4e20-8ce8-9bf92ad4d910'
TEST_TENURE_GUID = 'a9017fc1-1430-4054-8554-286122eca79a'
TEST_TENURE_ID = '1231231'
TEST_LOCATION_GUID = 'bd933449-c6a0-4b91-8b54-4f854d0315eb'
TEST_LAT_1 = '48.4284000'
TEST_LONG_1 = '123.3656000'
TEST_PERMIT_ID_1 = '1'
TEST_PERMIT_GUID_1 = 'd57604f4-c0de-4ec3-bf19-e82baaf14349'
TEST_PERMIT_NO_1 = 'TEST56789012'
TEST_PERMIT_STATUS_CODE_1 = 'T'
TEST_PERMIT_STATUS_CODE_NAME_1 = 'TEST'
TEST_PERMIT_AMENDMENT_STATUS_CODE = 'ACT'
TEST_PERMIT_AMENDMENT_STATUS_CODE_NAME = 'ACTIVE'
TEST_PERMIT_AMENDMENT_TYPE_CODE = 'AMD'
TEST_PERMIT_AMENDMENT_TYPE_CODE_NAME = 'AMENDMENT'
TEST_PERMIT_AMENDMENT_TYPE_CODE_2 = 'OGP'
TEST_PERMIT_AMENDMENT_STATUS_CODE_2 = 'RMT'
TEST_PERMITTEE_GUID = '07bd6be1-5a27-4280-94a9-6358cc95d07d'
TEST_PERMIT_STATUS_CODES = ['O', 'C', 'T']
TEST_PARTY_TYPE = 'PER'
TEST_ORG_TYPE = 'ORG'
TEST_REGION_GUID = 'a0b98cb1-6e70-4c54-9395-1e8ccc867cd1'
TEST_REGION_CODES = ['SW', 'SC', 'NW', 'NE', 'SE']
TEST_REGION_CODE_DISPLAY_ORDER = [10, 20, 30, 40, 50]
TEST_REGION_CODE = 'NE'
TEST_REGION_DESCRIPTION = 'North East Region'
TEST_MINE_TENURE_TYPE_CODES = ['COL', 'MIN', 'PLR', 'BCL']
TEST_MINE_TENURE_TYPE_DESCRIPTIONS = ['Coal', 'Mineral', 'Placer', 'BC Land']
TEST_MINE_DISTURBANCE_CODES = ['SUR', 'UND', 'CWA', 'MIL']
TEST_MINE_DISTURBANCE_DESCRIPTIONS = ['Surface', 'Underground', 'Coal Wash', 'Mill']
TEST_MINE_COMMODITY_CODES = ['TO', 'MC', 'CG', 'SA', 'AE', 'AL']
TEST_MINE_COMMODITY_DESCRIPTIONS = [
'Thermal Coal', 'Metallurgic', 'Construction Aggregate', 'Sand and Gravel', 'Agate', 'Aluminum'
]
TEST_REQUIRED_REPORT_CATEGORY_TAILINGS_GUID = 'bd5ef43b-379a-41a0-aa00-c5b632e9c329'
TEST_REQUIRED_REPORT_CATEGORY_TAILINGS = 'TSF'
TEST_REQUIRED_REPORT_SUB_CATEGORY_1 = 'INI'
TEST_REQUIRED_REPORT_CATEGORY_OTHER_GUID = 'bd5ef43b-379a-41a0-aa00-c5b632e9c32a'
TEST_REQUIRED_REPORT_CATEGORY_OTHER = 'MINE_OTHER'
TEST_REQUIRED_REPORT_DUE_DATE_TYPE = ['FIS', 'ANV']
TEST_REQUIRED_REPORT_DUE_DATE_DESCRIPTION = ['type1', 'type2']
TEST_REQUIRED_REPORT_GUID1 = '78cd68a9-f0ff-4fac-b2b7-10efe67e37b9'
TEST_REQUIRED_REPORT_NAME1 = 'Required Report for Tailings Facility 1'
TEST_REQUIRED_REPORT_GUID2 = '78cd68a9-f0ff-4fac-b2b7-10efe67e37ba'
TEST_REQUIRED_REPORT_NAME2 = 'Required Report for Tailings Facility 2'
TEST_REQUIRED_REPORT_GUID3 = '78cd68a9-f0ff-4fac-b2b7-10efe67e37bb'
TEST_REQUIRED_REPORT_NAME3 = 'Required Report for OTHER'
TEST_EXPECTED_DOCUMENT_GUID1 = 'f6c98d68-e565-41f3-9cea-d3cb4542c813'
TEST_EXPECTED_DOCUMENT_NAME1 = 'Expected Document 1'
TEST_EXPECTED_DOCUMENT_GUID2 = 'f6c98d68-e565-41f3-9cea-d3cb4542c814'
TEST_EXPECTED_DOCUMENT_NAME2 = 'Expected Document 2'
TEST_EXPECTED_DOCUMENT_STATUS_CODE1 = 'MIA'
TEST_EXPECTED_DOCUMENT_STATUS_CODE2 = 'PRE'
TEST_MINE_DOCUMENT_NAME1 = 'Mine Document 1'
TEST_DOCUMENT_MANAGER_FILE_GUID = 'a7cd9625-887c-4cc6-9faa-9396a1718a8f'
TEST_TAILINGS_STORAGE_FACILITY_GUID1 = '6e7348fd-5aaf-4910-a2e2-c36d17ff6903'
TEST_TAILINGS_STORAGE_FACILITY_NAME1 = 'Tailings Facility 1'
TEST_TAILINGS_STORAGE_FACILITY_GUID2 = '6e7348fd-5aaf-4910-a2e2-c36d17ff6904'
TEST_TAILINGS_STORAGE_FACILITY_NAME2 = 'Tailings Facility 2'
TEST_MINE_PARTY_APPT_TYPE_CODE1 = 'AAA'
TEST_MINE_PARTY_APPT_TYPE_DESCRIPTION1 = 'Alliterating Ardvarks Are Awesome'
TEST_MINE_PARTY_APPT_TYPE_CODE2 = 'BBB'
TEST_MINE_PARTY_APPT_TYPE_DESCRIPTION2 = 'Buzzing Bumblebees be ballin'
TEST_MINE_PARTY_APPT_GUID = '2484d785-a3c7-47db-b296-edb284a3c160'
# Auth Constants
TOKEN_HEADER = {"alg": "RS256", "typ": "JWT", "kid": "flask-jwt-oidc-test-client"}
BASE_AUTH_CLAIMS = {
"iss": "test_issuer",
"sub": "43e6a245-0bf7-4ccf-9bd0-e7fb85fd18cc",
"aud": "test_audience",
"exp": 21531718745,
"iat": 1531718745,
"jti": "flask-jwt-oidc-test-support",
"typ": "Bearer",
"username": "test-user",
"realm_access": {
"roles": ["idir"]
}
}
FULL_AUTH_CLAIMS = {
"iss": "test_issuer",
"sub": "43e6a245-0bf7-4ccf-9bd0-e7fb85fd18cc",
"aud": "test_audience",
"exp": 21531718745,
"iat": 1531718745,
"jti": "flask-jwt-oidc-test-support",
"typ": "Bearer",
"username": "test-user",
"preferred_username": "test-user",
"email": "test-email",
"given_name": "test-given-name",
"realm_access": {
"roles": ["mds-mine-view", "mds-mine-create", "mds-mine-admin", "idir"]
}
}
VIEW_ONLY_AUTH_CLAIMS = {
"iss": "test_issuer",
"sub": "43e6a245-0bf7-4ccf-9bd0-e7fb85fd18cc",
"aud": "test_audience",
"exp": 21531718745,
"iat": 1531718745,
"jti": "flask-jwt-oidc-test-support",
"typ": "Bearer",
"username": "test-user",
"email": "test-email",
"realm_access": {
"roles": ["mds-mine-view", "idir"]
}
}
CREATE_ONLY_AUTH_CLAIMS = {
"iss": "test_issuer",
"sub": "43e6a245-0bf7-4ccf-9bd0-e7fb85fd18cc",
"aud": "test_audience",
"exp": 21531718745,
"iat": 1531718745,
"jti": "flask-jwt-oidc-test-support",
"typ": "Bearer",
"username": "test-user",
"realm_access": {
"roles": ["mds-mine-create", "idir"]
}
}
ADMIN_ONLY_AUTH_CLAIMS = {
"iss": "test_issuer",
"sub": "43e6a245-0bf7-4ccf-9bd0-e7fb85fd18cc",
"aud": "test_audience",
"exp": 21531718745,
"iat": 1531718745,
"jti": "flask-jwt-oidc-test-support",
"typ": "Bearer",
"username": "test-user",
"realm_access": {
"roles": ["mds-mine-admin", "idir"]
}
}
PROPONENT_ONLY_AUTH_CLAIMS = {
"iss": "test_issuer",
"sub": "43e6a245-0bf7-4ccf-9bd0-e7fb85fd18cc",
"aud": "test_audience",
"exp": 21531718745,
"iat": 1531718745,
"jti": "flask-jwt-oidc-test-support",
"typ": "Bearer",
"username": "test-proponent",
"email": "test-proponent-email@minespace.ca",
"realm_access": {
"roles": ["minespace-proponent"]
}
}
| 32.699552 | 99 | 0.737383 |
dca1df2ff229771d238e4f298702a5823570238f | 10,981 | py | Python | control/webapp/member.py | doismellburning/control-panel | 516feeaac3a0f4c704105204b6efe75a94ba42c3 | [
"MIT"
] | null | null | null | control/webapp/member.py | doismellburning/control-panel | 516feeaac3a0f4c704105204b6efe75a94ba42c3 | [
"MIT"
] | null | null | null | control/webapp/member.py | doismellburning/control-panel | 516feeaac3a0f4c704105204b6efe75a94ba42c3 | [
"MIT"
] | null | null | null | from urllib.parse import urlparse
from flask import Blueprint, render_template, request, redirect, url_for
from werkzeug.exceptions import NotFound, Forbidden
from .utils import srcf_db_sess as sess
from .utils import parse_domain_name, create_job_maybe_email_and_redirect, find_member
from . import utils, inspect_services
from srcf.controllib import jobs
from srcf.database import Domain
from srcf import domains
import re
import string
bp = Blueprint("member", __name__)
@bp.route('/member')
def home():
crsid, mem = find_member(allow_inactive=True)
if not mem.user:
return redirect(url_for('member.reactivate'))
inspect_services.lookup_all(mem)
return render_template("member/home.html", member=mem)
@bp.route("/reactivate", methods=["GET", "POST"])
def reactivate():
crsid, mem = find_member(allow_inactive=True)
if mem.user:
raise NotFound
email = None
error = None
if request.method == "POST":
email = request.form.get("email", "").strip()
error = utils.validate_member_email(crsid, email)
if request.method == "POST" and not error:
return create_job_maybe_email_and_redirect(
jobs.Reactivate, member=mem, email=email)
else:
return render_template("member/reactivate.html", member=mem, email=email, error=error)
@bp.route("/member/email", methods=["GET", "POST"])
def update_email_address():
crsid, mem = find_member()
email = mem.email
error = None
if request.method == "POST":
email = request.form.get("email", "").strip()
if mem.email == email:
error = "That's the address we have already."
else:
error = utils.validate_member_email(crsid, email)
if request.method == "POST" and not error:
return create_job_maybe_email_and_redirect(
jobs.UpdateEmailAddress, member=mem, email=email)
else:
return render_template("member/update_email_address.html", member=mem, email=email, error=error)
@bp.route("/member/srcf-email", methods=["GET", "POST"])
def update_email_handler():
crsid, mem = find_member()
mail_handler = mem.mail_handler
if request.method == "POST":
mail_handler = request.form.get("mail_handler", "").strip()
if mem.mail_handler == mail_handler:
# No change requested
return redirect(url_for("member.home"))
if request.method == "POST":
if not request.form.get("confirm", ""):
return render_template("member/update_email_handler_confirm.html", member=mem,
old_mail_handler=mem.mail_handler, mail_handler=mail_handler)
else:
return create_job_maybe_email_and_redirect(
jobs.UpdateMailHandler, member=mem, mail_handler=mail_handler)
else:
return render_template("member/update_email_handler.html", member=mem, mail_handler=mail_handler)
@bp.route("/member/mailinglist", methods=["GET", "POST"])
def create_mailing_list():
crsid, mem = find_member()
listname = ""
error = None
if request.method == "POST":
listname = request.form.get("listname", "").strip()
if not listname:
error = "Please enter a list name."
elif re.search(r"[^a-z0-9_-]", listname):
error = "List names can only contain letters, numbers, hyphens and underscores."
else:
lists = inspect_services.lookup_mailinglists(crsid)
if "{}-{}".format(crsid, listname) in lists:
error = "This mailing list already exists."
if request.method == "POST" and not error:
return create_job_maybe_email_and_redirect(
jobs.CreateUserMailingList, member=mem,
listname=listname)
else:
return render_template("member/create_mailing_list.html", member=mem, listname=listname, error=error)
@bp.route("/member/mailinglist/<listname>/password", methods=["GET", "POST"])
def reset_mailing_list_password(listname):
crsid, mem = find_member()
lists = inspect_services.lookup_mailinglists(crsid)
if listname not in lists:
raise NotFound
if request.method == "POST":
return create_job_maybe_email_and_redirect(
jobs.ResetUserMailingListPassword, member=mem, listname=listname)
else:
return render_template("member/reset_mailing_list_password.html", member=mem, listname=listname)
@bp.route("/member/srcf/password", methods=["GET", "POST"], defaults={"type": "srcf"})
@bp.route("/member/mysql/password", methods=["GET", "POST"], defaults={"type": "mysql"})
@bp.route("/member/postgres/password", methods=["GET", "POST"], defaults={"type": "postgres"})
def reset_password(type):
crsid, mem = find_member()
if request.method == "POST":
cls = {"mysql": jobs.ResetMySQLUserPassword,
"postgres": jobs.ResetPostgresUserPassword,
"srcf": jobs.ResetUserPassword}[type]
return create_job_maybe_email_and_redirect(cls, member=mem)
else:
formatted_name = {"mysql": "MySQL",
"postgres": "PostgreSQL",
"srcf": "SRCF"}[type]
web_interface = {"mysql": "phpMyAdmin",
"postgres": "phpPgAdmin",
"srcf": None}[type]
if type == "srcf":
affects = "password-based access to the shell service and SFTP"
else:
affects = "access to " + web_interface + ", as well as any scripts that access databases using your account"
return render_template("member/reset_password.html", member=mem, type=type, name=formatted_name, affects=affects)
@bp.route("/member/mysql/createuser", methods=["GET", "POST"], defaults={"type": "mysql"})
@bp.route("/member/postgres/createuser", methods=["GET", "POST"], defaults={"type": "postgres"})
def create_database_account(type):
crsid, mem = find_member()
if request.method == "POST":
cls = {"mysql": jobs.ResetMySQLUserPassword,
"postgres": jobs.ResetPostgresUserPassword}[type]
return create_job_maybe_email_and_redirect(cls, member=mem)
else:
formatted_name = {"mysql": "MySQL",
"postgres": "PostgreSQL"}[type]
return render_template("member/create_database_account.html", member=mem, type=type, name=formatted_name)
@bp.route("/member/mysql/create", methods=["GET", "POST"], defaults={"type": "mysql"})
@bp.route("/member/postgres/create", methods=["GET", "POST"], defaults={"type": "postgres"})
def create_database(type):
crsid, mem = find_member()
if request.method == "POST":
cls = {"mysql": jobs.CreateMySQLUserDatabase,
"postgres": jobs.CreatePostgresUserDatabase}[type]
return create_job_maybe_email_and_redirect(cls, member=mem)
else:
formatted_name = {"mysql": "MySQL",
"postgres": "PostgreSQL"}[type]
inspect = {"mysql": inspect_services.lookup_mysqluser,
"postgres": inspect_services.lookup_pguser}[type]
has_user = inspect(mem.crsid)
return render_template("member/create_database.html", member=mem, type=type, name=formatted_name, user=has_user)
@bp.route("/member/domains/add", methods=["GET", "POST"])
def add_vhost():
crsid, mem = find_member()
domain = ""
root = ""
errors = {}
if request.method == "POST":
domain = request.form.get("domain", "").strip()
root = request.form.get("root", "").strip()
if domain:
parsed = parse_domain_name(domain)
if domain != parsed:
domain = parsed
errors["domain"] = "We've corrected your input to just the domain name, submit again once you've checked it's correct."
elif domain.endswith("." + crsid + ".user.srcf.net"):
pass
elif domain.endswith(".user.srcf.net") or domain.endswith(".soc.srcf.net"):
errors["domain"] = "SRCF domains can't be registered here."
elif sess.query(Domain).filter(Domain.domain == domain).count():
errors["domain"] = "This domain is already registered."
else:
errors["domain"] = "Please enter a domain or subdomain."
if request.form.get("edit") or errors:
return render_template("member/add_vhost.html", member=mem, domain=domain, root=root, errors=errors)
confirm = True
if request.form.get("confirm"):
confirm = False
else:
valid = {}
prefixed = "www.{}".format(domain)
for d in (domain, prefixed):
valid[d] = domains.verify(d)
if all(v == (True, True) for v in valid.values()):
confirm = False
if confirm:
return render_template("member/add_vhost_test.html", member=mem, domain=domain, root=root, valid=valid)
else:
return create_job_maybe_email_and_redirect(
jobs.AddUserVhost, member=mem,
domain=domain, root=root)
else:
return render_template("member/add_vhost.html", member=mem, domain=domain, root=root, errors=errors)
@bp.route("/member/domains/<domain>/changedocroot", methods=["GET", "POST"])
def change_vhost_docroot(domain):
crsid, mem = find_member()
errors = {}
try:
record = sess.query(Domain).filter(Domain.domain == domain, Domain.owner == crsid)[0]
except IndexError:
raise NotFound
root = record.root.replace("public_html/", "") if record.root else ""
if request.method == "POST":
root = request.form.get("root", "").strip()
if any([ch in root for ch in string.whitespace + "\\" + "\"" + "\'"]) or ".." in root:
errors["root"] = "This document root is invalid."
try:
domain = parse_domain_name(domain)
except ValueError as e:
errors["domain"] = e.args[0]
if request.method == "POST" and not errors:
return create_job_maybe_email_and_redirect(
jobs.ChangeUserVhostDocroot, member=mem,
domain=domain, root=root)
else:
return render_template("member/change_vhost_docroot.html", member=mem, domain=domain, root=root, errors=errors)
@bp.route("/member/domains/<domain>/remove", methods=["GET", "POST"])
def remove_vhost(domain):
crsid, mem = find_member()
try:
record = sess.query(Domain).filter(Domain.domain == domain)[0]
except IndexError:
raise NotFound
if not record.owner == crsid:
raise Forbidden
if request.method == "POST":
return create_job_maybe_email_and_redirect(
jobs.RemoveUserVhost, member=mem,
domain=domain)
else:
return render_template("member/remove_vhost.html", member=mem, domain=domain)
| 39.217857 | 135 | 0.628449 |
4788ed51e6c14c18e5bab7c91a743d3b504d8c6b | 2,350 | py | Python | lib/assembly/kbase.py | olsonanl/assembly | 6bcecac2ba7de826d2a4625964b02c348e7ce4e9 | [
"MIT"
] | null | null | null | lib/assembly/kbase.py | olsonanl/assembly | 6bcecac2ba7de826d2a4625964b02c348e7ce4e9 | [
"MIT"
] | null | null | null | lib/assembly/kbase.py | olsonanl/assembly | 6bcecac2ba7de826d2a4625964b02c348e7ce4e9 | [
"MIT"
] | null | null | null | import sys
import json
# Usage: typespec_to_assembly_data typespec_json > assembly_data_json
def typespec_to_assembly_data(spec):
lib_types = dict (paired_end_libs = 'paired',
single_end_libs = 'single',
references = 'reference')
attr_map = dict(insert_size_mean = 'insert',
insert_size_std_dev = 'stdev')
data = {}
file_sets = []
for key, val in spec.items():
lib_type = lib_types.get(key)
if lib_type == None:
data[key] = val
else:
libs = val if isinstance(val, list) else [ val ]
for lib in libs:
file_set = dict((attr_map.get(k,k) ,v) for k,v in lib.items() if not is_handle(k,v))
file_set["file_infos"] = list(extract_handle(v) for k,v in lib.items() if is_handle(k,v))
file_set["type"] = lib_type
file_sets.append(file_set)
data["file_sets"] = file_sets
return data
def extract_handle(typespec_handle):
mapping = dict (
id = 'shock_id',
url = 'shock_url',
file_name = 'filename',
)
mapit = lambda k: mapping[k] if k in mapping else k
handle = dict((mapit(k), v) for k,v in typespec_handle.items())
return handle
def is_handle(k, v):
if k.find("handle") >= 0 and "id" in v:
return True
return False
def fasta_to_contigset(fasta_file, name):
contig_set = {'name:': name,
'source:':'AssemblyService',
'type': 'Genome',
'contigs': []}
##### Parse Fasta content
contig = {}
seq_buffer = ''
with open(fasta_file) as f:
for line in f:
if line[0] == '>':
header = line[1:].rstrip()
contig['id'] = header
contig['name'] = header
header = ''
elif line[0] == '\n':
if seq_buffer != '':
contig['sequence'] = seq_buffer
seq_buffer = ''
contig_set['contigs'].append(contig)
contig = {}
else:
seq_buffer += line.rstrip()
if seq_buffer != '':
contig['sequence'] = seq_buffer
contig_set['contigs'].append(contig)
return contig_set
| 32.638889 | 105 | 0.51617 |
ea585e09e4f0e9e84cc96cd706bc8a99fb825c76 | 55,837 | py | Python | huaweicloud-sdk-ces/huaweicloudsdkces/v1/ces_client.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | 1 | 2021-04-16T07:59:28.000Z | 2021-04-16T07:59:28.000Z | huaweicloud-sdk-ces/huaweicloudsdkces/v1/ces_client.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-ces/huaweicloudsdkces/v1/ces_client.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | 1 | 2022-01-17T02:24:18.000Z | 2022-01-17T02:24:18.000Z | # coding: utf-8
from __future__ import absolute_import
import datetime
import re
import importlib
import six
from huaweicloudsdkcore.client import Client, ClientBuilder
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.utils import http_utils
from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
class CesClient(Client):
"""
:param configuration: .Configuration object for this client
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long,
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self):
super(CesClient, self).__init__()
self.model_package = importlib.import_module("huaweicloudsdkces.v1.model")
self.preset_headers = {'User-Agent': 'HuaweiCloud-SDK-Python'}
@classmethod
def new_builder(cls, clazz=None):
if clazz is None:
return ClientBuilder(cls)
if clazz.__name__ != "CesClient":
raise TypeError("client type error, support client type is CesClient")
return ClientBuilder(clazz)
def batch_list_metric_data(self, request):
"""批量查询监控数据
批量查询指定时间范围内指定指标的指定粒度的监控数据,目前最多支持10指标的批量查询。
:param BatchListMetricDataRequest request
:return: BatchListMetricDataResponse
"""
return self.batch_list_metric_data_with_http_info(request)
def batch_list_metric_data_with_http_info(self, request):
"""批量查询监控数据
批量查询指定时间范围内指定指标的指定粒度的监控数据,目前最多支持10指标的批量查询。
:param BatchListMetricDataRequest request
:return: BatchListMetricDataResponse
"""
all_params = ['batch_list_metric_data_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/batch-query-metric-data',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='BatchListMetricDataResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_alarm(self, request):
"""创建告警规则
创建一条告警规则。
:param CreateAlarmRequest request
:return: CreateAlarmResponse
"""
return self.create_alarm_with_http_info(request)
def create_alarm_with_http_info(self, request):
"""创建告警规则
创建一条告警规则。
:param CreateAlarmRequest request
:return: CreateAlarmResponse
"""
all_params = ['create_alarm_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/alarms',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateAlarmResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_alarm_template(self, request):
"""创建自定义告警模板
创建自定义告警模板。
:param CreateAlarmTemplateRequest request
:return: CreateAlarmTemplateResponse
"""
return self.create_alarm_template_with_http_info(request)
def create_alarm_template_with_http_info(self, request):
"""创建自定义告警模板
创建自定义告警模板。
:param CreateAlarmTemplateRequest request
:return: CreateAlarmTemplateResponse
"""
all_params = ['create_alarm_template_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/alarm-template',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateAlarmTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_events(self, request):
"""上报事件
上报自定义事件。
:param CreateEventsRequest request
:return: CreateEventsResponse
"""
return self.create_events_with_http_info(request)
def create_events_with_http_info(self, request):
"""上报事件
上报自定义事件。
:param CreateEventsRequest request
:return: CreateEventsResponse
"""
all_params = ['event_items']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/events',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateEventsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_metric_data(self, request):
"""添加监控数据
添加一条或多条指标监控数据。
:param CreateMetricDataRequest request
:return: CreateMetricDataResponse
"""
return self.create_metric_data_with_http_info(request)
def create_metric_data_with_http_info(self, request):
"""添加监控数据
添加一条或多条指标监控数据。
:param CreateMetricDataRequest request
:return: CreateMetricDataResponse
"""
all_params = ['metric_data_item']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/metric-data',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateMetricDataResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_resource_group(self, request):
"""创建资源分组
创建资源分组,资源分组支持将各类资源按照业务集中进行分组管理,可以从分组角度查看监控与告警信息,以提升运维效率。
:param CreateResourceGroupRequest request
:return: CreateResourceGroupResponse
"""
return self.create_resource_group_with_http_info(request)
def create_resource_group_with_http_info(self, request):
"""创建资源分组
创建资源分组,资源分组支持将各类资源按照业务集中进行分组管理,可以从分组角度查看监控与告警信息,以提升运维效率。
:param CreateResourceGroupRequest request
:return: CreateResourceGroupResponse
"""
all_params = ['create_resource_group_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/resource-groups',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateResourceGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_alarm(self, request):
"""删除告警规则
删除一条告警规则。
:param DeleteAlarmRequest request
:return: DeleteAlarmResponse
"""
return self.delete_alarm_with_http_info(request)
def delete_alarm_with_http_info(self, request):
"""删除告警规则
删除一条告警规则。
:param DeleteAlarmRequest request
:return: DeleteAlarmResponse
"""
all_params = ['alarm_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'alarm_id' in local_var_params:
path_params['alarm_id'] = local_var_params['alarm_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/alarms/{alarm_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteAlarmResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_alarm_template(self, request):
"""删除自定义告警模板
根据ID删除自定义告警模板。
:param DeleteAlarmTemplateRequest request
:return: DeleteAlarmTemplateResponse
"""
return self.delete_alarm_template_with_http_info(request)
def delete_alarm_template_with_http_info(self, request):
"""删除自定义告警模板
根据ID删除自定义告警模板。
:param DeleteAlarmTemplateRequest request
:return: DeleteAlarmTemplateResponse
"""
all_params = ['template_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'template_id' in local_var_params:
path_params['template_id'] = local_var_params['template_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/alarm-template/{template_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteAlarmTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_resource_group(self, request):
"""删除资源分组
删除一条资源分组。
:param DeleteResourceGroupRequest request
:return: DeleteResourceGroupResponse
"""
return self.delete_resource_group_with_http_info(request)
def delete_resource_group_with_http_info(self, request):
"""删除资源分组
删除一条资源分组。
:param DeleteResourceGroupRequest request
:return: DeleteResourceGroupResponse
"""
all_params = ['group_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/resource-groups/{group_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteResourceGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_alarm_histories(self, request):
"""查询告警历史
查询告警历史列表。
:param ListAlarmHistoriesRequest request
:return: ListAlarmHistoriesResponse
"""
return self.list_alarm_histories_with_http_info(request)
def list_alarm_histories_with_http_info(self, request):
"""查询告警历史
查询告警历史列表。
:param ListAlarmHistoriesRequest request
:return: ListAlarmHistoriesResponse
"""
all_params = ['group_id', 'alarm_id', 'alarm_name', 'alarm_status', 'alarm_level', 'namespace', '_from', 'to', 'start', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
if 'alarm_id' in local_var_params:
query_params.append(('alarm_id', local_var_params['alarm_id']))
if 'alarm_name' in local_var_params:
query_params.append(('alarm_name', local_var_params['alarm_name']))
if 'alarm_status' in local_var_params:
query_params.append(('alarm_status', local_var_params['alarm_status']))
if 'alarm_level' in local_var_params:
query_params.append(('alarm_level', local_var_params['alarm_level']))
if 'namespace' in local_var_params:
query_params.append(('namespace', local_var_params['namespace']))
if '_from' in local_var_params:
query_params.append(('from', local_var_params['_from']))
if 'to' in local_var_params:
query_params.append(('to', local_var_params['to']))
if 'start' in local_var_params:
query_params.append(('start', local_var_params['start']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/alarm-histories',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListAlarmHistoriesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_alarm_templates(self, request):
"""查询自定义告警模板列表
查询自定义告警模板列表
:param ListAlarmTemplatesRequest request
:return: ListAlarmTemplatesResponse
"""
return self.list_alarm_templates_with_http_info(request)
def list_alarm_templates_with_http_info(self, request):
"""查询自定义告警模板列表
查询自定义告警模板列表
:param ListAlarmTemplatesRequest request
:return: ListAlarmTemplatesResponse
"""
all_params = ['alarm_template_id', 'namespace', 'dname', 'start', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'alarm_template_id' in local_var_params:
query_params.append(('alarmTemplateId', local_var_params['alarm_template_id']))
if 'namespace' in local_var_params:
query_params.append(('namespace', local_var_params['namespace']))
if 'dname' in local_var_params:
query_params.append(('dname', local_var_params['dname']))
if 'start' in local_var_params:
query_params.append(('start', local_var_params['start']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/alarm-template',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListAlarmTemplatesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_alarms(self, request):
"""查询告警规则列表
查询告警规则列表,可以指定分页条件限制结果数量,可以指定排序规则。
:param ListAlarmsRequest request
:return: ListAlarmsResponse
"""
return self.list_alarms_with_http_info(request)
def list_alarms_with_http_info(self, request):
"""查询告警规则列表
查询告警规则列表,可以指定分页条件限制结果数量,可以指定排序规则。
:param ListAlarmsRequest request
:return: ListAlarmsResponse
"""
all_params = ['limit', 'order', 'start']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'order' in local_var_params:
query_params.append(('order', local_var_params['order']))
if 'start' in local_var_params:
query_params.append(('start', local_var_params['start']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/alarms',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListAlarmsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_event_detail(self, request):
"""查询某一事件监控详情
根据事件监控名称,查询该事件发生的详细信息。
:param ListEventDetailRequest request
:return: ListEventDetailResponse
"""
return self.list_event_detail_with_http_info(request)
def list_event_detail_with_http_info(self, request):
"""查询某一事件监控详情
根据事件监控名称,查询该事件发生的详细信息。
:param ListEventDetailRequest request
:return: ListEventDetailResponse
"""
all_params = ['event_name', 'event_type', 'event_source', 'event_level', 'event_user', 'event_state', '_from', 'to', 'start', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'event_name' in local_var_params:
path_params['event_name'] = local_var_params['event_name']
query_params = []
if 'event_type' in local_var_params:
query_params.append(('event_type', local_var_params['event_type']))
if 'event_source' in local_var_params:
query_params.append(('event_source', local_var_params['event_source']))
if 'event_level' in local_var_params:
query_params.append(('event_level', local_var_params['event_level']))
if 'event_user' in local_var_params:
query_params.append(('event_user', local_var_params['event_user']))
if 'event_state' in local_var_params:
query_params.append(('event_state', local_var_params['event_state']))
if '_from' in local_var_params:
query_params.append(('from', local_var_params['_from']))
if 'to' in local_var_params:
query_params.append(('to', local_var_params['to']))
if 'start' in local_var_params:
query_params.append(('start', local_var_params['start']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/event/{event_name}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListEventDetailResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_events(self, request):
"""查询事件监控列表
查询事件监控列表,包括系统事件和自定义事件。
:param ListEventsRequest request
:return: ListEventsResponse
"""
return self.list_events_with_http_info(request)
def list_events_with_http_info(self, request):
"""查询事件监控列表
查询事件监控列表,包括系统事件和自定义事件。
:param ListEventsRequest request
:return: ListEventsResponse
"""
all_params = ['event_type', 'event_name', '_from', 'to', 'start', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'event_type' in local_var_params:
query_params.append(('event_type', local_var_params['event_type']))
if 'event_name' in local_var_params:
query_params.append(('event_name', local_var_params['event_name']))
if '_from' in local_var_params:
query_params.append(('from', local_var_params['_from']))
if 'to' in local_var_params:
query_params.append(('to', local_var_params['to']))
if 'start' in local_var_params:
query_params.append(('start', local_var_params['start']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/events',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListEventsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_metrics(self, request):
"""查询指标列表
查询系统当前可监控指标列表,可以指定指标命名空间、指标名称、维度、排序方式,起始记录和最大记录条数过滤查询结果。
:param ListMetricsRequest request
:return: ListMetricsResponse
"""
return self.list_metrics_with_http_info(request)
def list_metrics_with_http_info(self, request):
"""查询指标列表
查询系统当前可监控指标列表,可以指定指标命名空间、指标名称、维度、排序方式,起始记录和最大记录条数过滤查询结果。
:param ListMetricsRequest request
:return: ListMetricsResponse
"""
all_params = ['dim_0', 'dim_1', 'dim_2', 'limit', 'metric_name', 'namespace', 'order', 'start']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'dim_0' in local_var_params:
query_params.append(('dim.0', local_var_params['dim_0']))
if 'dim_1' in local_var_params:
query_params.append(('dim.1', local_var_params['dim_1']))
if 'dim_2' in local_var_params:
query_params.append(('dim.2', local_var_params['dim_2']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'metric_name' in local_var_params:
query_params.append(('metric_name', local_var_params['metric_name']))
if 'namespace' in local_var_params:
query_params.append(('namespace', local_var_params['namespace']))
if 'order' in local_var_params:
query_params.append(('order', local_var_params['order']))
if 'start' in local_var_params:
query_params.append(('start', local_var_params['start']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/metrics',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListMetricsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_resource_group(self, request):
"""查询所有资源分组
查询所创建的所有资源分组。
:param ListResourceGroupRequest request
:return: ListResourceGroupResponse
"""
return self.list_resource_group_with_http_info(request)
def list_resource_group_with_http_info(self, request):
"""查询所有资源分组
查询所创建的所有资源分组。
:param ListResourceGroupRequest request
:return: ListResourceGroupResponse
"""
all_params = ['group_name', 'group_id', 'status', 'start', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'group_name' in local_var_params:
query_params.append(('group_name', local_var_params['group_name']))
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'start' in local_var_params:
query_params.append(('start', local_var_params['start']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/resource-groups',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListResourceGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_alarm(self, request):
"""查询单条告警规则信息
根据告警ID查询告警规则信息。
:param ShowAlarmRequest request
:return: ShowAlarmResponse
"""
return self.show_alarm_with_http_info(request)
def show_alarm_with_http_info(self, request):
"""查询单条告警规则信息
根据告警ID查询告警规则信息。
:param ShowAlarmRequest request
:return: ShowAlarmResponse
"""
all_params = ['alarm_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'alarm_id' in local_var_params:
path_params['alarm_id'] = local_var_params['alarm_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/alarms/{alarm_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowAlarmResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_event_data(self, request):
"""查询主机配置数据
查询指定时间范围指定事件类型的主机配置数据,可以通过参数指定需要查询的数据维度。注意:该接口提供给HANA场景下SAP Monitor查询主机配置数据,其他场景下查不到主机配置数据。
:param ShowEventDataRequest request
:return: ShowEventDataResponse
"""
return self.show_event_data_with_http_info(request)
def show_event_data_with_http_info(self, request):
"""查询主机配置数据
查询指定时间范围指定事件类型的主机配置数据,可以通过参数指定需要查询的数据维度。注意:该接口提供给HANA场景下SAP Monitor查询主机配置数据,其他场景下查不到主机配置数据。
:param ShowEventDataRequest request
:return: ShowEventDataResponse
"""
all_params = ['namespace', 'dim_0', 'type', '_from', 'to', 'dim_1', 'dim_2', 'dim_3']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'namespace' in local_var_params:
query_params.append(('namespace', local_var_params['namespace']))
if 'dim_0' in local_var_params:
query_params.append(('dim.0', local_var_params['dim_0']))
if 'dim_1' in local_var_params:
query_params.append(('dim.1', local_var_params['dim_1']))
if 'dim_2' in local_var_params:
query_params.append(('dim.2', local_var_params['dim_2']))
if 'dim_3' in local_var_params:
query_params.append(('dim.3', local_var_params['dim_3']))
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
if '_from' in local_var_params:
query_params.append(('from', local_var_params['_from']))
if 'to' in local_var_params:
query_params.append(('to', local_var_params['to']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/event-data',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowEventDataResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_metric_data(self, request):
"""查询监控数据
查询指定时间范围指定指标的指定粒度的监控数据,可以通过参数指定需要查询的数据维度。
:param ShowMetricDataRequest request
:return: ShowMetricDataResponse
"""
return self.show_metric_data_with_http_info(request)
def show_metric_data_with_http_info(self, request):
"""查询监控数据
查询指定时间范围指定指标的指定粒度的监控数据,可以通过参数指定需要查询的数据维度。
:param ShowMetricDataRequest request
:return: ShowMetricDataResponse
"""
all_params = ['namespace', 'metric_name', 'dim_0', 'filter', 'period', '_from', 'to', 'dim_1', 'dim_2', 'dim_3']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'namespace' in local_var_params:
query_params.append(('namespace', local_var_params['namespace']))
if 'metric_name' in local_var_params:
query_params.append(('metric_name', local_var_params['metric_name']))
if 'dim_0' in local_var_params:
query_params.append(('dim.0', local_var_params['dim_0']))
if 'dim_1' in local_var_params:
query_params.append(('dim.1', local_var_params['dim_1']))
if 'dim_2' in local_var_params:
query_params.append(('dim.2', local_var_params['dim_2']))
if 'dim_3' in local_var_params:
query_params.append(('dim.3', local_var_params['dim_3']))
if 'filter' in local_var_params:
query_params.append(('filter', local_var_params['filter']))
if 'period' in local_var_params:
query_params.append(('period', local_var_params['period']))
if '_from' in local_var_params:
query_params.append(('from', local_var_params['_from']))
if 'to' in local_var_params:
query_params.append(('to', local_var_params['to']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/metric-data',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowMetricDataResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_quotas(self, request):
"""查询配额
查询用户可以创建的资源配额总数及当前使用量,当前仅有告警规则一种资源类型。
:param ShowQuotasRequest request
:return: ShowQuotasResponse
"""
return self.show_quotas_with_http_info(request)
def show_quotas_with_http_info(self, request):
"""查询配额
查询用户可以创建的资源配额总数及当前使用量,当前仅有告警规则一种资源类型。
:param ShowQuotasRequest request
:return: ShowQuotasResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/quotas',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowQuotasResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_resource_group(self, request):
"""查询资源分组下的资源
根据资源分组ID查询资源分组下的资源。
:param ShowResourceGroupRequest request
:return: ShowResourceGroupResponse
"""
return self.show_resource_group_with_http_info(request)
def show_resource_group_with_http_info(self, request):
"""查询资源分组下的资源
根据资源分组ID查询资源分组下的资源。
:param ShowResourceGroupRequest request
:return: ShowResourceGroupResponse
"""
all_params = ['group_id', 'status', 'namespace', 'dname', 'start', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'namespace' in local_var_params:
query_params.append(('namespace', local_var_params['namespace']))
if 'dname' in local_var_params:
query_params.append(('dname', local_var_params['dname']))
if 'start' in local_var_params:
query_params.append(('start', local_var_params['start']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/resource-groups/{group_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowResourceGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_alarm(self, request):
"""修改告警规则
修改告警规则。
:param UpdateAlarmRequest request
:return: UpdateAlarmResponse
"""
return self.update_alarm_with_http_info(request)
def update_alarm_with_http_info(self, request):
"""修改告警规则
修改告警规则。
:param UpdateAlarmRequest request
:return: UpdateAlarmResponse
"""
all_params = ['alarm_id', 'update_alarm_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'alarm_id' in local_var_params:
path_params['alarm_id'] = local_var_params['alarm_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/alarms/{alarm_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateAlarmResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_alarm_action(self, request):
"""启停告警规则
启动或停止一条告警规则。
:param UpdateAlarmActionRequest request
:return: UpdateAlarmActionResponse
"""
return self.update_alarm_action_with_http_info(request)
def update_alarm_action_with_http_info(self, request):
"""启停告警规则
启动或停止一条告警规则。
:param UpdateAlarmActionRequest request
:return: UpdateAlarmActionResponse
"""
all_params = ['alarm_id', 'modify_alarm_action_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'alarm_id' in local_var_params:
path_params['alarm_id'] = local_var_params['alarm_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/alarms/{alarm_id}/action',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateAlarmActionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_alarm_template(self, request):
"""更新自定义告警模板
更新自定义告警模板。
:param UpdateAlarmTemplateRequest request
:return: UpdateAlarmTemplateResponse
"""
return self.update_alarm_template_with_http_info(request)
def update_alarm_template_with_http_info(self, request):
"""更新自定义告警模板
更新自定义告警模板。
:param UpdateAlarmTemplateRequest request
:return: UpdateAlarmTemplateResponse
"""
all_params = ['template_id', 'update_alarm_template_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'template_id' in local_var_params:
path_params['template_id'] = local_var_params['template_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/alarm-template/{template_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateAlarmTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_resource_group(self, request):
"""更新资源分组
更新资源分组,资源分组支持将各类资源按照业务集中进行分组管理,可以从分组角度查看监控与告警信息,以提升运维效率。
:param UpdateResourceGroupRequest request
:return: UpdateResourceGroupResponse
"""
return self.update_resource_group_with_http_info(request)
def update_resource_group_with_http_info(self, request):
"""更新资源分组
更新资源分组,资源分组支持将各类资源按照业务集中进行分组管理,可以从分组角度查看监控与告警信息,以提升运维效率。
:param UpdateResourceGroupRequest request
:return: UpdateResourceGroupResponse
"""
all_params = ['group_id', 'update_resource_group_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/V1.0/{project_id}/resource-groups/{group_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateResourceGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None,
post_params=None, response_type=None, response_headers=None, auth_settings=None,
collection_formats=None, request_type=None):
"""Makes the HTTP request and returns deserialized data.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: Response data type.
:param response_headers: Header should be added to response data.
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param request_type: Request data type.
:return:
Return the response directly.
"""
return self.do_http_request(
method=method,
resource_path=resource_path,
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body,
post_params=post_params,
response_type=response_type,
response_headers=response_headers,
collection_formats=collection_formats,
request_type=request_type)
| 31.228747 | 142 | 0.621989 |
abfd9f2c4ac77fab652bddc1c0629b3ae1004f14 | 8,841 | py | Python | tools/detection/test.py | ypwhs/mmfewshot | 2cda666e5c50586a73516f860b66729593af0d4a | [
"Apache-2.0"
] | 1 | 2021-12-30T07:24:48.000Z | 2021-12-30T07:24:48.000Z | tools/detection/test.py | xiaodongdreams/mmfewshot | ded7c357edcca29a84e61e6ce55ef9dff407d112 | [
"Apache-2.0"
] | null | null | null | tools/detection/test.py | xiaodongdreams/mmfewshot | ded7c357edcca29a84e61e6ce55ef9dff407d112 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmfewshot.detection.datasets import (build_dataloader, build_dataset,
get_copy_dataset_type)
from mmfewshot.detection.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(
description='MMFewShot test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
args.cfg_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.show \
or args.show_dir, (
'Please specify at least one operation (save/eval/show the '
'results / save the results) with the argument "--out", "--eval"',
'"--show" or "--show-dir"')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
# currently only support single images testing
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
assert samples_per_gpu == 1, 'currently only support single images testing'
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# pop frozen_parameters
cfg.model.pop('frozen_parameters', None)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
# for meta-learning methods which require support template dataset
# for model initialization.
if cfg.data.get('model_init', None) is not None:
cfg.data.model_init.pop('copy_from_train_dataset')
model_init_samples_per_gpu = cfg.data.model_init.pop(
'samples_per_gpu', 1)
model_init_workers_per_gpu = cfg.data.model_init.pop(
'workers_per_gpu', 1)
if cfg.data.model_init.get('ann_cfg', None) is None:
assert checkpoint['meta'].get('model_init_ann_cfg',
None) is not None
cfg.data.model_init.type = \
get_copy_dataset_type(cfg.data.model_init.type)
cfg.data.model_init.ann_cfg = \
checkpoint['meta']['model_init_ann_cfg']
model_init_dataset = build_dataset(cfg.data.model_init)
# disable dist to make all rank get same data
model_init_dataloader = build_dataloader(
model_init_dataset,
samples_per_gpu=model_init_samples_per_gpu,
workers_per_gpu=model_init_workers_per_gpu,
dist=False,
shuffle=False)
if not distributed:
model = MMDataParallel(model, device_ids=[0])
show_kwargs = dict(show_score_thr=args.show_score_thr)
if cfg.data.get('model_init', None) is not None:
from mmfewshot.detection.apis import (single_gpu_model_init,
single_gpu_test)
single_gpu_model_init(model, model_init_dataloader)
else:
from mmdet.apis.test import single_gpu_test
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
**show_kwargs)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
if cfg.data.get('model_init', None) is not None:
from mmfewshot.detection.apis import (multi_gpu_model_init,
multi_gpu_test)
multi_gpu_model_init(model, model_init_dataloader)
else:
from mmdet.apis.test import multi_gpu_test
outputs = multi_gpu_test(
model,
data_loader,
args.tmpdir,
args.gpu_collect,
)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
| 38.947137 | 79 | 0.625042 |
5397f36d8fa6018fe38f871ca43fd30f2d494d6f | 387 | py | Python | frontend/stock-api/core/wsgi.py | xiaomoxu/cuddly-bassoon | 630ad6f5081bb600af4ad456f80f736538a6e574 | [
"MIT"
] | 2 | 2018-10-29T10:43:17.000Z | 2020-11-14T12:33:29.000Z | frontend/stock-api/core/wsgi.py | xiaomoxu/cuddly-bassoon | 630ad6f5081bb600af4ad456f80f736538a6e574 | [
"MIT"
] | 1 | 2018-03-21T08:06:41.000Z | 2018-03-21T08:06:41.000Z | frontend/stock-api/core/wsgi.py | xiaomoxu/cuddly-bassoon | 630ad6f5081bb600af4ad456f80f736538a6e574 | [
"MIT"
] | 2 | 2018-01-23T01:48:06.000Z | 2018-10-29T10:43:22.000Z | """
WSGI config for stocks project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings")
application = get_wsgi_application()
| 22.764706 | 78 | 0.782946 |
afbd88aeae089788eb6e9660e828529238ec19cf | 500 | py | Python | instagram/forms.py | ALKEMIA-CHARLES/Instaclone | fc7dbcccb48dbc313da15b12a17a5ade19675efa | [
"Unlicense"
] | null | null | null | instagram/forms.py | ALKEMIA-CHARLES/Instaclone | fc7dbcccb48dbc313da15b12a17a5ade19675efa | [
"Unlicense"
] | 9 | 2020-06-05T20:37:40.000Z | 2021-09-22T18:28:23.000Z | instagram/forms.py | ALKEMIA-CHARLES/Instaclone | fc7dbcccb48dbc313da15b12a17a5ade19675efa | [
"Unlicense"
] | null | null | null | from django import forms
from instagram.models import Post, Comments, Profile
from pyuploadcare.dj.forms import ImageField
from django.contrib.auth.models import User
class Uploadindexphotoform(forms.ModelForm):
class Meta:
model = Profile
fields = ['image']
class UserUpdateForm(forms.ModelForm):
class Meta:
model = User
fields = ['username']
class CommentForm(forms.ModelForm):
class Meta:
model = Comments
fields = ['comment'] | 23.809524 | 52 | 0.684 |
162e5292af57f4586269a0ce2a72b2a5ecf2faf5 | 7,943 | py | Python | test/test_enqueue_copy.py | zachjweiner/pyopencl | 4e2e4f3150c331680e6d9e36c59290411e4a0c40 | [
"Apache-2.0"
] | null | null | null | test/test_enqueue_copy.py | zachjweiner/pyopencl | 4e2e4f3150c331680e6d9e36c59290411e4a0c40 | [
"Apache-2.0"
] | null | null | null | test/test_enqueue_copy.py | zachjweiner/pyopencl | 4e2e4f3150c331680e6d9e36c59290411e4a0c40 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
__copyright__ = "Copyright (C) 2016 Shane J. Latham"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import pyopencl as cl
import pytest
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests)
from pyopencl.characterize import get_pocl_version
def generate_slice(start, shape):
return tuple([slice(start[i], start[i]+shape[i]) for i in range(len(start))])
def test_enqueue_copy_rect_2d(ctx_factory, honor_skip=True):
"""
Test 2D sub-array (slice) copy.
"""
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
if (honor_skip
and ctx.devices[0].platform.name == "Portable Computing Language"
and get_pocl_version(ctx.devices[0].platform) <= (0, 13)):
# https://github.com/pocl/pocl/issues/353
pytest.skip("POCL's rectangular copies crash")
ary_in_shp = 256, 128 # Entire array shape from which sub-array copied to device
sub_ary_shp = 128, 96 # Sub-array shape to be copied to device
ary_in_origin = 20, 13 # Sub-array origin
ary_in_slice = generate_slice(ary_in_origin, sub_ary_shp)
ary_out_origin = 11, 19 # Origin of sub-array copy from device to host-array
ary_out_shp = 512, 256 # Entire host-array shape copy sub-array device->host
ary_out_slice = generate_slice(ary_out_origin, sub_ary_shp)
buf_in_origin = 7, 3 # Origin of sub-array in device buffer
buf_in_shp = 300, 200 # shape of device buffer
buf_out_origin = 31, 17 # Origin of 2nd device buffer
buf_out_shp = 300, 400 # shape of 2nd device buffer
# Create host array of random values.
h_ary_in = \
np.array(
np.random.randint(
0,
256,
np.product(ary_in_shp)
),
dtype=np.uint8
).reshape(ary_in_shp)
# Create device buffers
d_in_buf = cl.Buffer(ctx, cl.mem_flags.READ_ONLY, size=np.product(buf_in_shp))
d_out_buf = cl.Buffer(ctx, cl.mem_flags.READ_ONLY, size=np.product(buf_out_shp))
# Copy sub-array (rectangular buffer) from host to device
cl.enqueue_copy(
queue,
d_in_buf,
h_ary_in,
buffer_origin=buf_in_origin[::-1],
host_origin=ary_in_origin[::-1],
region=sub_ary_shp[::-1],
buffer_pitches=(buf_in_shp[-1],),
host_pitches=(ary_in_shp[-1],)
)
# Copy sub-array (rectangular buffer) from device-buffer to device-buffer
cl.enqueue_copy(
queue,
d_out_buf,
d_in_buf,
src_origin=buf_in_origin[::-1],
dst_origin=buf_out_origin[::-1],
region=sub_ary_shp[::-1],
src_pitches=(buf_in_shp[-1],),
dst_pitches=(buf_out_shp[-1],)
)
# Create zero-initialised array to receive sub-array from device
h_ary_out = np.zeros(ary_out_shp, dtype=h_ary_in.dtype)
# Copy sub-array (rectangular buffer) from device to host-array.
cl.enqueue_copy(
queue,
h_ary_out,
d_out_buf,
buffer_origin=buf_out_origin[::-1],
host_origin=ary_out_origin[::-1],
region=sub_ary_shp[::-1],
buffer_pitches=(buf_out_shp[-1],),
host_pitches=(ary_out_shp[-1],)
)
queue.finish()
# Check that the sub-array copied to device is
# the same as the sub-array received from device.
assert np.all(h_ary_in[ary_in_slice] == h_ary_out[ary_out_slice])
def test_enqueue_copy_rect_3d(ctx_factory, honor_skip=True):
"""
Test 3D sub-array (slice) copy.
"""
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
if (honor_skip
and ctx.devices[0].platform.name == "Portable Computing Language"
and get_pocl_version(ctx.devices[0].platform) <= (0, 13)):
# https://github.com/pocl/pocl/issues/353
pytest.skip("POCL's rectangular copies crash")
ary_in_shp = 256, 128, 31 # array shape from which sub-array copied to device
sub_ary_shp = 128, 96, 20 # Sub-array shape to be copied to device
ary_in_origin = 20, 13, 7 # Sub-array origin
ary_in_slice = generate_slice(ary_in_origin, sub_ary_shp)
ary_out_origin = 11, 19, 14 # Origin of sub-array copy from device to host-array
ary_out_shp = 192, 256, 128 # Entire host-array shape copy sub-array dev->host
ary_out_slice = generate_slice(ary_out_origin, sub_ary_shp)
buf_in_origin = 7, 3, 6 # Origin of sub-array in device buffer
buf_in_shp = 300, 200, 30 # shape of device buffer
buf_out_origin = 31, 17, 3 # Origin of 2nd device buffer
buf_out_shp = 300, 400, 40 # shape of 2nd device buffer
# Create host array of random values.
h_ary_in = \
np.array(
np.random.randint(
0,
256,
np.product(ary_in_shp)
),
dtype=np.uint8
).reshape(ary_in_shp)
# Create device buffers
d_in_buf = cl.Buffer(ctx, cl.mem_flags.READ_ONLY, size=np.product(buf_in_shp))
d_out_buf = cl.Buffer(ctx, cl.mem_flags.READ_ONLY, size=np.product(buf_out_shp))
# Copy sub-array (rectangular buffer) from host to device
cl.enqueue_copy(
queue,
d_in_buf,
h_ary_in,
buffer_origin=buf_in_origin[::-1],
host_origin=ary_in_origin[::-1],
region=sub_ary_shp[::-1],
buffer_pitches=(buf_in_shp[-1], buf_in_shp[-1]*buf_in_shp[-2]),
host_pitches=(ary_in_shp[-1], ary_in_shp[-1]*ary_in_shp[-2])
)
# Copy sub-array (rectangular buffer) from device-buffer to device-buffer
cl.enqueue_copy(
queue,
d_out_buf,
d_in_buf,
src_origin=buf_in_origin[::-1],
dst_origin=buf_out_origin[::-1],
region=sub_ary_shp[::-1],
src_pitches=(buf_in_shp[-1], buf_in_shp[-1]*buf_in_shp[-2]),
dst_pitches=(buf_out_shp[-1], buf_out_shp[-1]*buf_out_shp[-2])
)
# Create zero-initialised array to receive sub-array from device
h_ary_out = np.zeros(ary_out_shp, dtype=h_ary_in.dtype)
# Copy sub-array (rectangular buffer) from device to host-array.
cl.enqueue_copy(
queue,
h_ary_out,
d_out_buf,
buffer_origin=buf_out_origin[::-1],
host_origin=ary_out_origin[::-1],
region=sub_ary_shp[::-1],
buffer_pitches=(buf_out_shp[-1], buf_out_shp[-1]*buf_out_shp[-2]),
host_pitches=(ary_out_shp[-1], ary_out_shp[-1]*ary_out_shp[-2])
)
queue.finish()
# Check that the sub-array copied to device is
# the same as the sub-array received from device.
assert np.array_equal(h_ary_in[ary_in_slice], h_ary_out[ary_out_slice])
if __name__ == "__main__":
# make sure that import failures get reported, instead of skipping the tests.
import pyopencl # noqa
import sys
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from pytest import main
main([__file__])
| 35.779279 | 85 | 0.665869 |
9d57dbaaaa92851a1fa5fa9a3cb41bd6b821efc9 | 773 | py | Python | integration_tests/test_cfngin/tests/08_blank_namespace.py | troyready/runway | 4fd299961a4b73df39e14f4f19a7236f7be17dd8 | [
"Apache-2.0"
] | null | null | null | integration_tests/test_cfngin/tests/08_blank_namespace.py | troyready/runway | 4fd299961a4b73df39e14f4f19a7236f7be17dd8 | [
"Apache-2.0"
] | null | null | null | integration_tests/test_cfngin/tests/08_blank_namespace.py | troyready/runway | 4fd299961a4b73df39e14f4f19a7236f7be17dd8 | [
"Apache-2.0"
] | null | null | null | """CFNgin test."""
# flake8: noqa
# pylint: disable=invalid-name
from os.path import basename
from integration_tests.test_cfngin.test_cfngin import Cfngin
FILE_BASENAME = ".".join(basename(__file__).split(".")[:-1])
class TestBlankNamespace(Cfngin):
"""Test CFNgin namespace as an empty string.
Requires valid AWS credentials.
"""
REQUIRED_FIXTURE_FILES = [FILE_BASENAME + ".yaml"]
TEST_NAME = __name__
def run(self):
"""Run the test."""
self.copy_fixtures()
code, _stdout, stderr = self.runway_cmd("deploy")
assert code == 0, "exit code should be zero"
def teardown(self):
"""Teardown any created resources and delete files."""
self.runway_cmd("destroy")
self.cleanup_fixtures()
| 24.935484 | 62 | 0.659767 |
350a4fd153af288d10b30c76ecdd1feb9651a26d | 56,528 | py | Python | moto/s3/models.py | pt-arvind/moto | 2f2cb367aa434edbd8bbf53a2cdfaddd03ca578a | [
"Apache-2.0"
] | null | null | null | moto/s3/models.py | pt-arvind/moto | 2f2cb367aa434edbd8bbf53a2cdfaddd03ca578a | [
"Apache-2.0"
] | null | null | null | moto/s3/models.py | pt-arvind/moto | 2f2cb367aa434edbd8bbf53a2cdfaddd03ca578a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import os
import base64
import datetime
import pytz
import hashlib
import copy
import itertools
import codecs
import random
import string
import tempfile
import threading
import sys
import time
import uuid
import six
from bisect import insort
from moto.core import ACCOUNT_ID, BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime
from moto.cloudwatch.models import MetricDatum
from moto.utilities.tagging_service import TaggingService
from .exceptions import (
BucketAlreadyExists,
MissingBucket,
InvalidBucketName,
InvalidPart,
InvalidRequest,
EntityTooSmall,
MissingKey,
InvalidNotificationDestination,
MalformedXML,
InvalidStorageClass,
InvalidTargetBucketForLogging,
CrossLocationLoggingProhibitted,
NoSuchPublicAccessBlockConfiguration,
InvalidPublicAccessBlockConfiguration,
WrongPublicAccessBlockAccountIdError,
NoSuchUpload,
)
from .utils import clean_key_name, _VersionedKeyStore
MAX_BUCKET_NAME_LENGTH = 63
MIN_BUCKET_NAME_LENGTH = 3
UPLOAD_ID_BYTES = 43
UPLOAD_PART_MIN_SIZE = 5242880
STORAGE_CLASS = [
"STANDARD",
"REDUCED_REDUNDANCY",
"STANDARD_IA",
"ONEZONE_IA",
"INTELLIGENT_TIERING",
"GLACIER",
"DEEP_ARCHIVE",
]
DEFAULT_KEY_BUFFER_SIZE = 16 * 1024 * 1024
DEFAULT_TEXT_ENCODING = sys.getdefaultencoding()
OWNER = "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a"
def get_moto_s3_account_id():
"""This makes it easy for mocking AWS Account IDs when using AWS Config
-- Simply mock.patch the ACCOUNT_ID here, and Config gets it for free.
"""
return ACCOUNT_ID
class FakeDeleteMarker(BaseModel):
def __init__(self, key):
self.key = key
self.name = key.name
self.last_modified = datetime.datetime.utcnow()
self._version_id = str(uuid.uuid4())
@property
def last_modified_ISO8601(self):
return iso_8601_datetime_with_milliseconds(self.last_modified)
@property
def version_id(self):
return self._version_id
class FakeKey(BaseModel):
def __init__(
self,
name,
value,
storage="STANDARD",
etag=None,
is_versioned=False,
version_id=0,
max_buffer_size=DEFAULT_KEY_BUFFER_SIZE,
multipart=None,
bucket_name=None,
):
self.name = name
self.last_modified = datetime.datetime.utcnow()
self.acl = get_canned_acl("private")
self.website_redirect_location = None
self._storage_class = storage if storage else "STANDARD"
self._metadata = {}
self._expiry = None
self._etag = etag
self._version_id = version_id
self._is_versioned = is_versioned
self.multipart = multipart
self.bucket_name = bucket_name
self._value_buffer = tempfile.SpooledTemporaryFile(max_size=max_buffer_size)
self._max_buffer_size = max_buffer_size
self.value = value
self.lock = threading.Lock()
@property
def version_id(self):
return self._version_id
@property
def value(self):
self.lock.acquire()
self._value_buffer.seek(0)
r = self._value_buffer.read()
r = copy.copy(r)
self.lock.release()
return r
@property
def arn(self):
# S3 Objects don't have an ARN, but we do need something unique when creating tags against this resource
return "arn:aws:s3:::{}/{}/{}".format(
self.bucket_name, self.name, self.version_id
)
@value.setter
def value(self, new_value):
self._value_buffer.seek(0)
self._value_buffer.truncate()
# Hack for working around moto's own unit tests; this probably won't
# actually get hit in normal use.
if isinstance(new_value, six.text_type):
new_value = new_value.encode(DEFAULT_TEXT_ENCODING)
self._value_buffer.write(new_value)
self.contentsize = len(new_value)
def copy(self, new_name=None, new_is_versioned=None):
r = copy.deepcopy(self)
if new_name is not None:
r.name = new_name
if new_is_versioned is not None:
r._is_versioned = new_is_versioned
r.refresh_version()
return r
def set_metadata(self, metadata, replace=False):
if replace:
self._metadata = {}
self._metadata.update(metadata)
def set_storage_class(self, storage):
if storage is not None and storage not in STORAGE_CLASS:
raise InvalidStorageClass(storage=storage)
self._storage_class = storage
def set_acl(self, acl):
self.acl = acl
def append_to_value(self, value):
self.contentsize += len(value)
self._value_buffer.seek(0, os.SEEK_END)
self._value_buffer.write(value)
self.last_modified = datetime.datetime.utcnow()
self._etag = None # must recalculate etag
if self._is_versioned:
self._version_id = str(uuid.uuid4())
else:
self._version_id = None
def restore(self, days):
self._expiry = datetime.datetime.utcnow() + datetime.timedelta(days)
def refresh_version(self):
self._version_id = str(uuid.uuid4())
self.last_modified = datetime.datetime.utcnow()
@property
def etag(self):
if self._etag is None:
value_md5 = hashlib.md5()
self._value_buffer.seek(0)
while True:
block = self._value_buffer.read(DEFAULT_KEY_BUFFER_SIZE)
if not block:
break
value_md5.update(block)
self._etag = value_md5.hexdigest()
return '"{0}"'.format(self._etag)
@property
def last_modified_ISO8601(self):
return iso_8601_datetime_with_milliseconds(self.last_modified)
@property
def last_modified_RFC1123(self):
# Different datetime formats depending on how the key is obtained
# https://github.com/boto/boto/issues/466
return rfc_1123_datetime(self.last_modified)
@property
def metadata(self):
return self._metadata
@property
def response_dict(self):
res = {
"ETag": self.etag,
"last-modified": self.last_modified_RFC1123,
"content-length": str(self.size),
}
if self._storage_class != "STANDARD":
res["x-amz-storage-class"] = self._storage_class
if self._expiry is not None:
rhdr = 'ongoing-request="false", expiry-date="{0}"'
res["x-amz-restore"] = rhdr.format(self.expiry_date)
if self._is_versioned:
res["x-amz-version-id"] = str(self.version_id)
if self.website_redirect_location:
res["x-amz-website-redirect-location"] = self.website_redirect_location
return res
@property
def size(self):
return self.contentsize
@property
def storage_class(self):
return self._storage_class
@property
def expiry_date(self):
if self._expiry is not None:
return self._expiry.strftime("%a, %d %b %Y %H:%M:%S GMT")
# Keys need to be pickleable due to some implementation details of boto3.
# Since file objects aren't pickleable, we need to override the default
# behavior. The following is adapted from the Python docs:
# https://docs.python.org/3/library/pickle.html#handling-stateful-objects
def __getstate__(self):
state = self.__dict__.copy()
state["value"] = self.value
del state["_value_buffer"]
del state["lock"]
return state
def __setstate__(self, state):
self.__dict__.update({k: v for k, v in six.iteritems(state) if k != "value"})
self._value_buffer = tempfile.SpooledTemporaryFile(
max_size=self._max_buffer_size
)
self.value = state["value"]
self.lock = threading.Lock()
class FakeMultipart(BaseModel):
def __init__(self, key_name, metadata):
self.key_name = key_name
self.metadata = metadata
self.parts = {}
self.partlist = [] # ordered list of part ID's
rand_b64 = base64.b64encode(os.urandom(UPLOAD_ID_BYTES))
self.id = rand_b64.decode("utf-8").replace("=", "").replace("+", "")
def complete(self, body):
decode_hex = codecs.getdecoder("hex_codec")
total = bytearray()
md5s = bytearray()
last = None
count = 0
for pn, etag in body:
part = self.parts.get(pn)
part_etag = None
if part is not None:
part_etag = part.etag.replace('"', "")
etag = etag.replace('"', "")
if part is None or part_etag != etag:
raise InvalidPart()
if last is not None and last.contentsize < UPLOAD_PART_MIN_SIZE:
raise EntityTooSmall()
md5s.extend(decode_hex(part_etag)[0])
total.extend(part.value)
last = part
count += 1
etag = hashlib.md5()
etag.update(bytes(md5s))
return total, "{0}-{1}".format(etag.hexdigest(), count)
def set_part(self, part_id, value):
if part_id < 1:
return
key = FakeKey(part_id, value)
self.parts[part_id] = key
if part_id not in self.partlist:
insort(self.partlist, part_id)
return key
def list_parts(self):
for part_id in self.partlist:
yield self.parts[part_id]
class FakeGrantee(BaseModel):
def __init__(self, id="", uri="", display_name=""):
self.id = id
self.uri = uri
self.display_name = display_name
def __eq__(self, other):
if not isinstance(other, FakeGrantee):
return False
return (
self.id == other.id
and self.uri == other.uri
and self.display_name == other.display_name
)
@property
def type(self):
return "Group" if self.uri else "CanonicalUser"
def __repr__(self):
return "FakeGrantee(display_name: '{}', id: '{}', uri: '{}')".format(
self.display_name, self.id, self.uri
)
ALL_USERS_GRANTEE = FakeGrantee(uri="http://acs.amazonaws.com/groups/global/AllUsers")
AUTHENTICATED_USERS_GRANTEE = FakeGrantee(
uri="http://acs.amazonaws.com/groups/global/AuthenticatedUsers"
)
LOG_DELIVERY_GRANTEE = FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")
PERMISSION_FULL_CONTROL = "FULL_CONTROL"
PERMISSION_WRITE = "WRITE"
PERMISSION_READ = "READ"
PERMISSION_WRITE_ACP = "WRITE_ACP"
PERMISSION_READ_ACP = "READ_ACP"
CAMEL_CASED_PERMISSIONS = {
"FULL_CONTROL": "FullControl",
"WRITE": "Write",
"READ": "Read",
"WRITE_ACP": "WriteAcp",
"READ_ACP": "ReadAcp",
}
class FakeGrant(BaseModel):
def __init__(self, grantees, permissions):
self.grantees = grantees
self.permissions = permissions
def __repr__(self):
return "FakeGrant(grantees: {}, permissions: {})".format(
self.grantees, self.permissions
)
class FakeAcl(BaseModel):
def __init__(self, grants=None):
grants = grants or []
self.grants = grants
@property
def public_read(self):
for grant in self.grants:
if ALL_USERS_GRANTEE in grant.grantees:
if PERMISSION_READ in grant.permissions:
return True
if PERMISSION_FULL_CONTROL in grant.permissions:
return True
return False
def __repr__(self):
return "FakeAcl(grants: {})".format(self.grants)
def to_config_dict(self):
"""Returns the object into the format expected by AWS Config"""
data = {
"grantSet": None, # Always setting this to None. Feel free to change.
"owner": {"displayName": None, "id": OWNER},
}
# Add details for each Grant:
grant_list = []
for grant in self.grants:
permissions = (
grant.permissions
if isinstance(grant.permissions, list)
else [grant.permissions]
)
for permission in permissions:
for grantee in grant.grantees:
# Config does not add the owner if its permissions are FULL_CONTROL:
if permission == "FULL_CONTROL" and grantee.id == OWNER:
continue
if grantee.uri:
grant_list.append(
{
"grantee": grantee.uri.split(
"http://acs.amazonaws.com/groups/s3/"
)[1],
"permission": CAMEL_CASED_PERMISSIONS[permission],
}
)
else:
grant_list.append(
{
"grantee": {
"id": grantee.id,
"displayName": None
if not grantee.display_name
else grantee.display_name,
},
"permission": CAMEL_CASED_PERMISSIONS[permission],
}
)
if grant_list:
data["grantList"] = grant_list
return data
def get_canned_acl(acl):
owner_grantee = FakeGrantee(id=OWNER)
grants = [FakeGrant([owner_grantee], [PERMISSION_FULL_CONTROL])]
if acl == "private":
pass # no other permissions
elif acl == "public-read":
grants.append(FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ]))
elif acl == "public-read-write":
grants.append(
FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ, PERMISSION_WRITE])
)
elif acl == "authenticated-read":
grants.append(FakeGrant([AUTHENTICATED_USERS_GRANTEE], [PERMISSION_READ]))
elif acl == "bucket-owner-read":
pass # TODO: bucket owner ACL
elif acl == "bucket-owner-full-control":
pass # TODO: bucket owner ACL
elif acl == "aws-exec-read":
pass # TODO: bucket owner, EC2 Read
elif acl == "log-delivery-write":
grants.append(
FakeGrant([LOG_DELIVERY_GRANTEE], [PERMISSION_READ_ACP, PERMISSION_WRITE])
)
else:
assert False, "Unknown canned acl: %s" % (acl,)
return FakeAcl(grants=grants)
class LifecycleFilter(BaseModel):
def __init__(self, prefix=None, tag=None, and_filter=None):
self.prefix = prefix
(self.tag_key, self.tag_value) = tag if tag else (None, None)
self.and_filter = and_filter
def to_config_dict(self):
if self.prefix is not None:
return {
"predicate": {"type": "LifecyclePrefixPredicate", "prefix": self.prefix}
}
elif self.tag_key:
return {
"predicate": {
"type": "LifecycleTagPredicate",
"tag": {"key": self.tag_key, "value": self.tag_value},
}
}
else:
return {
"predicate": {
"type": "LifecycleAndOperator",
"operands": self.and_filter.to_config_dict(),
}
}
class LifecycleAndFilter(BaseModel):
def __init__(self, prefix=None, tags=None):
self.prefix = prefix
self.tags = tags
def to_config_dict(self):
data = []
if self.prefix is not None:
data.append({"type": "LifecyclePrefixPredicate", "prefix": self.prefix})
for key, value in self.tags.items():
data.append(
{"type": "LifecycleTagPredicate", "tag": {"key": key, "value": value},}
)
return data
class LifecycleRule(BaseModel):
def __init__(
self,
id=None,
prefix=None,
lc_filter=None,
status=None,
expiration_days=None,
expiration_date=None,
transition_days=None,
transition_date=None,
storage_class=None,
expired_object_delete_marker=None,
nve_noncurrent_days=None,
nvt_noncurrent_days=None,
nvt_storage_class=None,
aimu_days=None,
):
self.id = id
self.prefix = prefix
self.filter = lc_filter
self.status = status
self.expiration_days = expiration_days
self.expiration_date = expiration_date
self.transition_days = transition_days
self.transition_date = transition_date
self.storage_class = storage_class
self.expired_object_delete_marker = expired_object_delete_marker
self.nve_noncurrent_days = nve_noncurrent_days
self.nvt_noncurrent_days = nvt_noncurrent_days
self.nvt_storage_class = nvt_storage_class
self.aimu_days = aimu_days
def to_config_dict(self):
"""Converts the object to the AWS Config data dict.
Note: The following are missing that should be added in the future:
- transitions (returns None for now)
- noncurrentVersionTransitions (returns None for now)
:param kwargs:
:return:
"""
lifecycle_dict = {
"id": self.id,
"prefix": self.prefix,
"status": self.status,
"expirationInDays": int(self.expiration_days)
if self.expiration_days
else None,
"expiredObjectDeleteMarker": self.expired_object_delete_marker,
"noncurrentVersionExpirationInDays": -1 or int(self.nve_noncurrent_days),
"expirationDate": self.expiration_date,
"transitions": None, # Replace me with logic to fill in
"noncurrentVersionTransitions": None, # Replace me with logic to fill in
}
if self.aimu_days:
lifecycle_dict["abortIncompleteMultipartUpload"] = {
"daysAfterInitiation": self.aimu_days
}
else:
lifecycle_dict["abortIncompleteMultipartUpload"] = None
# Format the filter:
if self.prefix is None and self.filter is None:
lifecycle_dict["filter"] = {"predicate": None}
elif self.prefix:
lifecycle_dict["filter"] = None
else:
lifecycle_dict["filter"] = self.filter.to_config_dict()
return lifecycle_dict
class CorsRule(BaseModel):
def __init__(
self,
allowed_methods,
allowed_origins,
allowed_headers=None,
expose_headers=None,
max_age_seconds=None,
):
self.allowed_methods = (
[allowed_methods]
if isinstance(allowed_methods, six.string_types)
else allowed_methods
)
self.allowed_origins = (
[allowed_origins]
if isinstance(allowed_origins, six.string_types)
else allowed_origins
)
self.allowed_headers = (
[allowed_headers]
if isinstance(allowed_headers, six.string_types)
else allowed_headers
)
self.exposed_headers = (
[expose_headers]
if isinstance(expose_headers, six.string_types)
else expose_headers
)
self.max_age_seconds = max_age_seconds
class Notification(BaseModel):
def __init__(self, arn, events, filters=None, id=None):
self.id = (
id
if id
else "".join(
random.choice(string.ascii_letters + string.digits) for _ in range(50)
)
)
self.arn = arn
self.events = events
self.filters = filters if filters else {}
def to_config_dict(self):
data = {}
# Type and ARN will be filled in by NotificationConfiguration's to_config_dict:
data["events"] = [event for event in self.events]
if self.filters:
data["filter"] = {
"s3KeyFilter": {
"filterRules": [
{"name": fr["Name"], "value": fr["Value"]}
for fr in self.filters["S3Key"]["FilterRule"]
]
}
}
else:
data["filter"] = None
# Not sure why this is a thing since AWS just seems to return this as filters ¯\_(ツ)_/¯
data["objectPrefixes"] = []
return data
class NotificationConfiguration(BaseModel):
def __init__(self, topic=None, queue=None, cloud_function=None):
self.topic = (
[
Notification(
t["Topic"], t["Event"], filters=t.get("Filter"), id=t.get("Id")
)
for t in topic
]
if topic
else []
)
self.queue = (
[
Notification(
q["Queue"], q["Event"], filters=q.get("Filter"), id=q.get("Id")
)
for q in queue
]
if queue
else []
)
self.cloud_function = (
[
Notification(
c["CloudFunction"],
c["Event"],
filters=c.get("Filter"),
id=c.get("Id"),
)
for c in cloud_function
]
if cloud_function
else []
)
def to_config_dict(self):
data = {"configurations": {}}
for topic in self.topic:
topic_config = topic.to_config_dict()
topic_config["topicARN"] = topic.arn
topic_config["type"] = "TopicConfiguration"
data["configurations"][topic.id] = topic_config
for queue in self.queue:
queue_config = queue.to_config_dict()
queue_config["queueARN"] = queue.arn
queue_config["type"] = "QueueConfiguration"
data["configurations"][queue.id] = queue_config
for cloud_function in self.cloud_function:
cf_config = cloud_function.to_config_dict()
cf_config["queueARN"] = cloud_function.arn
cf_config["type"] = "LambdaConfiguration"
data["configurations"][cloud_function.id] = cf_config
return data
def convert_str_to_bool(item):
"""Converts a boolean string to a boolean value"""
if isinstance(item, str):
return item.lower() == "true"
return False
class PublicAccessBlock(BaseModel):
def __init__(
self,
block_public_acls,
ignore_public_acls,
block_public_policy,
restrict_public_buckets,
):
# The boto XML appears to expect these values to exist as lowercase strings...
self.block_public_acls = block_public_acls or "false"
self.ignore_public_acls = ignore_public_acls or "false"
self.block_public_policy = block_public_policy or "false"
self.restrict_public_buckets = restrict_public_buckets or "false"
def to_config_dict(self):
# Need to make the string values booleans for Config:
return {
"blockPublicAcls": convert_str_to_bool(self.block_public_acls),
"ignorePublicAcls": convert_str_to_bool(self.ignore_public_acls),
"blockPublicPolicy": convert_str_to_bool(self.block_public_policy),
"restrictPublicBuckets": convert_str_to_bool(self.restrict_public_buckets),
}
class FakeBucket(BaseModel):
def __init__(self, name, region_name):
self.name = name
self.region_name = region_name
self.keys = _VersionedKeyStore()
self.multiparts = {}
self.versioning_status = None
self.rules = []
self.policy = None
self.website_configuration = None
self.acl = get_canned_acl("private")
self.cors = []
self.logging = {}
self.notification_configuration = None
self.accelerate_configuration = None
self.payer = "BucketOwner"
self.creation_date = datetime.datetime.now(tz=pytz.utc)
self.public_access_block = None
self.encryption = None
@property
def location(self):
return self.region_name
@property
def is_versioned(self):
return self.versioning_status == "Enabled"
def set_lifecycle(self, rules):
self.rules = []
for rule in rules:
# Extract and validate actions from Lifecycle rule
expiration = rule.get("Expiration")
transition = rule.get("Transition")
try:
top_level_prefix = (
rule["Prefix"] or ""
) # If it's `None` the set to the empty string
except KeyError:
top_level_prefix = None
nve_noncurrent_days = None
if rule.get("NoncurrentVersionExpiration") is not None:
if rule["NoncurrentVersionExpiration"].get("NoncurrentDays") is None:
raise MalformedXML()
nve_noncurrent_days = rule["NoncurrentVersionExpiration"][
"NoncurrentDays"
]
nvt_noncurrent_days = None
nvt_storage_class = None
if rule.get("NoncurrentVersionTransition") is not None:
if rule["NoncurrentVersionTransition"].get("NoncurrentDays") is None:
raise MalformedXML()
if rule["NoncurrentVersionTransition"].get("StorageClass") is None:
raise MalformedXML()
nvt_noncurrent_days = rule["NoncurrentVersionTransition"][
"NoncurrentDays"
]
nvt_storage_class = rule["NoncurrentVersionTransition"]["StorageClass"]
aimu_days = None
if rule.get("AbortIncompleteMultipartUpload") is not None:
if (
rule["AbortIncompleteMultipartUpload"].get("DaysAfterInitiation")
is None
):
raise MalformedXML()
aimu_days = rule["AbortIncompleteMultipartUpload"][
"DaysAfterInitiation"
]
eodm = None
if expiration and expiration.get("ExpiredObjectDeleteMarker") is not None:
# This cannot be set if Date or Days is set:
if expiration.get("Days") or expiration.get("Date"):
raise MalformedXML()
eodm = expiration["ExpiredObjectDeleteMarker"]
# Pull out the filter:
lc_filter = None
if rule.get("Filter"):
# Can't have both `Filter` and `Prefix` (need to check for the presence of the key):
try:
# 'Prefix' cannot be outside of a Filter:
if rule["Prefix"] or not rule["Prefix"]:
raise MalformedXML()
except KeyError:
pass
filters = 0
try:
prefix_filter = (
rule["Filter"]["Prefix"] or ""
) # If it's `None` the set to the empty string
filters += 1
except KeyError:
prefix_filter = None
and_filter = None
if rule["Filter"].get("And"):
filters += 1
and_tags = {}
if rule["Filter"]["And"].get("Tag"):
if not isinstance(rule["Filter"]["And"]["Tag"], list):
rule["Filter"]["And"]["Tag"] = [
rule["Filter"]["And"]["Tag"]
]
for t in rule["Filter"]["And"]["Tag"]:
and_tags[t["Key"]] = t.get("Value", "")
try:
and_prefix = (
rule["Filter"]["And"]["Prefix"] or ""
) # If it's `None` then set to the empty string
except KeyError:
and_prefix = None
and_filter = LifecycleAndFilter(prefix=and_prefix, tags=and_tags)
filter_tag = None
if rule["Filter"].get("Tag"):
filters += 1
filter_tag = (
rule["Filter"]["Tag"]["Key"],
rule["Filter"]["Tag"].get("Value", ""),
)
# Can't have more than 1 filter:
if filters > 1:
raise MalformedXML()
lc_filter = LifecycleFilter(
prefix=prefix_filter, tag=filter_tag, and_filter=and_filter
)
# If no top level prefix and no filter is present, then this is invalid:
if top_level_prefix is None:
try:
rule["Filter"]
except KeyError:
raise MalformedXML()
self.rules.append(
LifecycleRule(
id=rule.get("ID"),
prefix=top_level_prefix,
lc_filter=lc_filter,
status=rule["Status"],
expiration_days=expiration.get("Days") if expiration else None,
expiration_date=expiration.get("Date") if expiration else None,
transition_days=transition.get("Days") if transition else None,
transition_date=transition.get("Date") if transition else None,
storage_class=transition.get("StorageClass")
if transition
else None,
expired_object_delete_marker=eodm,
nve_noncurrent_days=nve_noncurrent_days,
nvt_noncurrent_days=nvt_noncurrent_days,
nvt_storage_class=nvt_storage_class,
aimu_days=aimu_days,
)
)
def delete_lifecycle(self):
self.rules = []
def set_cors(self, rules):
self.cors = []
if len(rules) > 100:
raise MalformedXML()
for rule in rules:
assert isinstance(rule["AllowedMethod"], list) or isinstance(
rule["AllowedMethod"], six.string_types
)
assert isinstance(rule["AllowedOrigin"], list) or isinstance(
rule["AllowedOrigin"], six.string_types
)
assert isinstance(rule.get("AllowedHeader", []), list) or isinstance(
rule.get("AllowedHeader", ""), six.string_types
)
assert isinstance(rule.get("ExposedHeader", []), list) or isinstance(
rule.get("ExposedHeader", ""), six.string_types
)
assert isinstance(rule.get("MaxAgeSeconds", "0"), six.string_types)
if isinstance(rule["AllowedMethod"], six.string_types):
methods = [rule["AllowedMethod"]]
else:
methods = rule["AllowedMethod"]
for method in methods:
if method not in ["GET", "PUT", "HEAD", "POST", "DELETE"]:
raise InvalidRequest(method)
self.cors.append(
CorsRule(
rule["AllowedMethod"],
rule["AllowedOrigin"],
rule.get("AllowedHeader"),
rule.get("ExposedHeader"),
rule.get("MaxAgeSecond"),
)
)
def delete_cors(self):
self.cors = []
def set_logging(self, logging_config, bucket_backend):
if not logging_config:
self.logging = {}
return
# Target bucket must exist in the same account (assuming all moto buckets are in the same account):
if not bucket_backend.buckets.get(logging_config["TargetBucket"]):
raise InvalidTargetBucketForLogging(
"The target bucket for logging does not exist."
)
# Does the target bucket have the log-delivery WRITE and READ_ACP permissions?
write = read_acp = False
for grant in bucket_backend.buckets[logging_config["TargetBucket"]].acl.grants:
# Must be granted to: http://acs.amazonaws.com/groups/s3/LogDelivery
for grantee in grant.grantees:
if grantee.uri == "http://acs.amazonaws.com/groups/s3/LogDelivery":
if (
"WRITE" in grant.permissions
or "FULL_CONTROL" in grant.permissions
):
write = True
if (
"READ_ACP" in grant.permissions
or "FULL_CONTROL" in grant.permissions
):
read_acp = True
break
if not write or not read_acp:
raise InvalidTargetBucketForLogging(
"You must give the log-delivery group WRITE and READ_ACP"
" permissions to the target bucket"
)
# Buckets must also exist within the same region:
if (
bucket_backend.buckets[logging_config["TargetBucket"]].region_name
!= self.region_name
):
raise CrossLocationLoggingProhibitted()
# Checks pass -- set the logging config:
self.logging = logging_config
def set_notification_configuration(self, notification_config):
if not notification_config:
self.notification_configuration = None
return
self.notification_configuration = NotificationConfiguration(
topic=notification_config.get("TopicConfiguration"),
queue=notification_config.get("QueueConfiguration"),
cloud_function=notification_config.get("CloudFunctionConfiguration"),
)
# Validate that the region is correct:
for thing in ["topic", "queue", "cloud_function"]:
for t in getattr(self.notification_configuration, thing):
region = t.arn.split(":")[3]
if region != self.region_name:
raise InvalidNotificationDestination()
def set_accelerate_configuration(self, accelerate_config):
if self.accelerate_configuration is None and accelerate_config == "Suspended":
# Cannot "suspend" a not active acceleration. Leaves it undefined
return
self.accelerate_configuration = accelerate_config
def set_website_configuration(self, website_configuration):
self.website_configuration = website_configuration
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "DomainName":
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "DomainName" ]"')
elif attribute_name == "WebsiteURL":
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "WebsiteURL" ]"')
raise UnformattedGetAttTemplateException()
def set_acl(self, acl):
self.acl = acl
@property
def arn(self):
return "arn:aws:s3:::{}".format(self.name)
@property
def physical_resource_id(self):
return self.name
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
bucket = s3_backend.create_bucket(resource_name, region_name)
return bucket
def to_config_dict(self):
"""Return the AWS Config JSON format of this S3 bucket.
Note: The following features are not implemented and will need to be if you care about them:
- Bucket Accelerate Configuration
"""
config_dict = {
"version": "1.3",
"configurationItemCaptureTime": str(self.creation_date),
"configurationItemStatus": "ResourceDiscovered",
"configurationStateId": str(
int(time.mktime(self.creation_date.timetuple()))
), # PY2 and 3 compatible
"configurationItemMD5Hash": "",
"arn": self.arn,
"resourceType": "AWS::S3::Bucket",
"resourceId": self.name,
"resourceName": self.name,
"awsRegion": self.region_name,
"availabilityZone": "Regional",
"resourceCreationTime": str(self.creation_date),
"relatedEvents": [],
"relationships": [],
"tags": s3_backend.tagger.get_tag_dict_for_resource(self.arn),
"configuration": {
"name": self.name,
"owner": {"id": OWNER},
"creationDate": self.creation_date.isoformat(),
},
}
# Make the supplementary configuration:
# This is a dobule-wrapped JSON for some reason...
s_config = {
"AccessControlList": json.dumps(json.dumps(self.acl.to_config_dict()))
}
if self.public_access_block:
s_config["PublicAccessBlockConfiguration"] = json.dumps(
self.public_access_block.to_config_dict()
)
# Tagging is special:
if config_dict["tags"]:
s_config["BucketTaggingConfiguration"] = json.dumps(
{"tagSets": [{"tags": config_dict["tags"]}]}
)
# TODO implement Accelerate Configuration:
s_config["BucketAccelerateConfiguration"] = {"status": None}
if self.rules:
s_config["BucketLifecycleConfiguration"] = {
"rules": [rule.to_config_dict() for rule in self.rules]
}
s_config["BucketLoggingConfiguration"] = {
"destinationBucketName": self.logging.get("TargetBucket", None),
"logFilePrefix": self.logging.get("TargetPrefix", None),
}
s_config["BucketPolicy"] = {
"policyText": self.policy.decode("utf-8") if self.policy else None
}
s_config["IsRequesterPaysEnabled"] = (
"false" if self.payer == "BucketOwner" else "true"
)
if self.notification_configuration:
s_config[
"BucketNotificationConfiguration"
] = self.notification_configuration.to_config_dict()
else:
s_config["BucketNotificationConfiguration"] = {"configurations": {}}
config_dict["supplementaryConfiguration"] = s_config
return config_dict
class S3Backend(BaseBackend):
def __init__(self):
self.buckets = {}
self.account_public_access_block = None
self.tagger = TaggingService()
# TODO: This is broken! DO NOT IMPORT MUTABLE DATA TYPES FROM OTHER AREAS -- THIS BREAKS UNMOCKING!
# WRAP WITH A GETTER/SETTER FUNCTION
# Register this class as a CloudWatch Metric Provider
# Must provide a method 'get_cloudwatch_metrics' that will return a list of metrics, based on the data available
# metric_providers["S3"] = self
def get_cloudwatch_metrics(self):
metrics = []
for name, bucket in self.buckets.items():
metrics.append(
MetricDatum(
namespace="AWS/S3",
name="BucketSizeBytes",
value=bucket.keys.item_size(),
dimensions=[
{"Name": "StorageType", "Value": "StandardStorage"},
{"Name": "BucketName", "Value": name},
],
timestamp=datetime.datetime.now(),
)
)
metrics.append(
MetricDatum(
namespace="AWS/S3",
name="NumberOfObjects",
value=len(bucket.keys),
dimensions=[
{"Name": "StorageType", "Value": "AllStorageTypes"},
{"Name": "BucketName", "Value": name},
],
timestamp=datetime.datetime.now(),
)
)
return metrics
def create_bucket(self, bucket_name, region_name):
if bucket_name in self.buckets:
raise BucketAlreadyExists(bucket=bucket_name)
if not MIN_BUCKET_NAME_LENGTH <= len(bucket_name) <= MAX_BUCKET_NAME_LENGTH:
raise InvalidBucketName()
new_bucket = FakeBucket(name=bucket_name, region_name=region_name)
self.buckets[bucket_name] = new_bucket
return new_bucket
def get_all_buckets(self):
return self.buckets.values()
def get_bucket(self, bucket_name):
try:
return self.buckets[bucket_name]
except KeyError:
raise MissingBucket(bucket=bucket_name)
def delete_bucket(self, bucket_name):
bucket = self.get_bucket(bucket_name)
if bucket.keys:
# Can't delete a bucket with keys
return False
else:
return self.buckets.pop(bucket_name)
def set_bucket_versioning(self, bucket_name, status):
self.get_bucket(bucket_name).versioning_status = status
def get_bucket_versioning(self, bucket_name):
return self.get_bucket(bucket_name).versioning_status
def get_bucket_encryption(self, bucket_name):
return self.get_bucket(bucket_name).encryption
def get_bucket_latest_versions(self, bucket_name):
versions = self.get_bucket_versions(bucket_name)
latest_modified_per_key = {}
latest_versions = {}
for version in versions:
name = version.name
last_modified = version.last_modified
version_id = version.version_id
latest_modified_per_key[name] = max(
last_modified, latest_modified_per_key.get(name, datetime.datetime.min)
)
if last_modified == latest_modified_per_key[name]:
latest_versions[name] = version_id
return latest_versions
def get_bucket_versions(
self,
bucket_name,
delimiter=None,
encoding_type=None,
key_marker=None,
max_keys=None,
version_id_marker=None,
prefix="",
):
bucket = self.get_bucket(bucket_name)
if any((delimiter, key_marker, version_id_marker)):
raise NotImplementedError(
"Called get_bucket_versions with some of delimiter, encoding_type, key_marker, version_id_marker"
)
return itertools.chain(
*(l for key, l in bucket.keys.iterlists() if key.startswith(prefix))
)
def get_bucket_policy(self, bucket_name):
return self.get_bucket(bucket_name).policy
def set_bucket_policy(self, bucket_name, policy):
self.get_bucket(bucket_name).policy = policy
def delete_bucket_policy(self, bucket_name, body):
bucket = self.get_bucket(bucket_name)
bucket.policy = None
def put_bucket_encryption(self, bucket_name, encryption):
self.get_bucket(bucket_name).encryption = encryption
def delete_bucket_encryption(self, bucket_name):
self.get_bucket(bucket_name).encryption = None
def set_bucket_lifecycle(self, bucket_name, rules):
bucket = self.get_bucket(bucket_name)
bucket.set_lifecycle(rules)
def set_bucket_website_configuration(self, bucket_name, website_configuration):
bucket = self.get_bucket(bucket_name)
bucket.set_website_configuration(website_configuration)
def get_bucket_website_configuration(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.website_configuration
def get_bucket_public_access_block(self, bucket_name):
bucket = self.get_bucket(bucket_name)
if not bucket.public_access_block:
raise NoSuchPublicAccessBlockConfiguration()
return bucket.public_access_block
def get_account_public_access_block(self, account_id):
# The account ID should equal the account id that is set for Moto:
if account_id != ACCOUNT_ID:
raise WrongPublicAccessBlockAccountIdError()
if not self.account_public_access_block:
raise NoSuchPublicAccessBlockConfiguration()
return self.account_public_access_block
def set_object(
self, bucket_name, key_name, value, storage=None, etag=None, multipart=None
):
key_name = clean_key_name(key_name)
if storage is not None and storage not in STORAGE_CLASS:
raise InvalidStorageClass(storage=storage)
bucket = self.get_bucket(bucket_name)
new_key = FakeKey(
name=key_name,
value=value,
storage=storage,
etag=etag,
is_versioned=bucket.is_versioned,
version_id=str(uuid.uuid4()) if bucket.is_versioned else None,
multipart=multipart,
)
keys = [
key
for key in bucket.keys.getlist(key_name, [])
if key.version_id != new_key.version_id
] + [new_key]
bucket.keys.setlist(key_name, keys)
return new_key
def append_to_key(self, bucket_name, key_name, value):
key_name = clean_key_name(key_name)
key = self.get_object(bucket_name, key_name)
key.append_to_value(value)
return key
def get_object(self, bucket_name, key_name, version_id=None, part_number=None):
key_name = clean_key_name(key_name)
bucket = self.get_bucket(bucket_name)
key = None
if bucket:
if version_id is None:
if key_name in bucket.keys:
key = bucket.keys[key_name]
else:
for key_version in bucket.keys.getlist(key_name, default=[]):
if str(key_version.version_id) == str(version_id):
key = key_version
break
if part_number and key and key.multipart:
key = key.multipart.parts[part_number]
if isinstance(key, FakeKey):
return key
else:
return None
def get_key_tags(self, key):
return self.tagger.list_tags_for_resource(key.arn)
def set_key_tags(self, key, tags, key_name=None):
if key is None:
raise MissingKey(key_name)
self.tagger.delete_all_tags_for_resource(key.arn)
self.tagger.tag_resource(
key.arn, [{"Key": k, "Value": v} for (k, v) in tags.items()],
)
return key
def get_bucket_tagging(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return self.tagger.list_tags_for_resource(bucket.arn)
def put_bucket_tagging(self, bucket_name, tags):
bucket = self.get_bucket(bucket_name)
self.tagger.delete_all_tags_for_resource(bucket.arn)
self.tagger.tag_resource(
bucket.arn, [{"Key": key, "Value": value} for key, value in tags.items()],
)
def delete_bucket_tagging(self, bucket_name):
bucket = self.get_bucket(bucket_name)
self.tagger.delete_all_tags_for_resource(bucket.arn)
def put_bucket_cors(self, bucket_name, cors_rules):
bucket = self.get_bucket(bucket_name)
bucket.set_cors(cors_rules)
def put_bucket_logging(self, bucket_name, logging_config):
bucket = self.get_bucket(bucket_name)
bucket.set_logging(logging_config, self)
def delete_bucket_cors(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.delete_cors()
def delete_bucket_public_access_block(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.public_access_block = None
def delete_account_public_access_block(self, account_id):
# The account ID should equal the account id that is set for Moto:
if account_id != ACCOUNT_ID:
raise WrongPublicAccessBlockAccountIdError()
self.account_public_access_block = None
def put_bucket_notification_configuration(self, bucket_name, notification_config):
bucket = self.get_bucket(bucket_name)
bucket.set_notification_configuration(notification_config)
def put_bucket_accelerate_configuration(
self, bucket_name, accelerate_configuration
):
if accelerate_configuration not in ["Enabled", "Suspended"]:
raise MalformedXML()
bucket = self.get_bucket(bucket_name)
if bucket.name.find(".") != -1:
raise InvalidRequest("PutBucketAccelerateConfiguration")
bucket.set_accelerate_configuration(accelerate_configuration)
def put_bucket_public_access_block(self, bucket_name, pub_block_config):
bucket = self.get_bucket(bucket_name)
if not pub_block_config:
raise InvalidPublicAccessBlockConfiguration()
bucket.public_access_block = PublicAccessBlock(
pub_block_config.get("BlockPublicAcls"),
pub_block_config.get("IgnorePublicAcls"),
pub_block_config.get("BlockPublicPolicy"),
pub_block_config.get("RestrictPublicBuckets"),
)
def put_account_public_access_block(self, account_id, pub_block_config):
# The account ID should equal the account id that is set for Moto:
if account_id != ACCOUNT_ID:
raise WrongPublicAccessBlockAccountIdError()
if not pub_block_config:
raise InvalidPublicAccessBlockConfiguration()
self.account_public_access_block = PublicAccessBlock(
pub_block_config.get("BlockPublicAcls"),
pub_block_config.get("IgnorePublicAcls"),
pub_block_config.get("BlockPublicPolicy"),
pub_block_config.get("RestrictPublicBuckets"),
)
def initiate_multipart(self, bucket_name, key_name, metadata):
bucket = self.get_bucket(bucket_name)
new_multipart = FakeMultipart(key_name, metadata)
bucket.multiparts[new_multipart.id] = new_multipart
return new_multipart
def complete_multipart(self, bucket_name, multipart_id, body):
bucket = self.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
value, etag = multipart.complete(body)
if value is None:
return
del bucket.multiparts[multipart_id]
key = self.set_object(
bucket_name, multipart.key_name, value, etag=etag, multipart=multipart
)
key.set_metadata(multipart.metadata)
return key
def cancel_multipart(self, bucket_name, multipart_id):
bucket = self.get_bucket(bucket_name)
multipart_data = bucket.multiparts.get(multipart_id, None)
if not multipart_data:
raise NoSuchUpload()
del bucket.multiparts[multipart_id]
def list_multipart(self, bucket_name, multipart_id):
bucket = self.get_bucket(bucket_name)
return list(bucket.multiparts[multipart_id].list_parts())
def get_all_multiparts(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.multiparts
def set_part(self, bucket_name, multipart_id, part_id, value):
bucket = self.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
return multipart.set_part(part_id, value)
def copy_part(
self,
dest_bucket_name,
multipart_id,
part_id,
src_bucket_name,
src_key_name,
src_version_id,
start_byte,
end_byte,
):
dest_bucket = self.get_bucket(dest_bucket_name)
multipart = dest_bucket.multiparts[multipart_id]
src_value = self.get_object(
src_bucket_name, src_key_name, version_id=src_version_id
).value
if start_byte is not None:
src_value = src_value[start_byte : end_byte + 1]
return multipart.set_part(part_id, src_value)
def prefix_query(self, bucket, prefix, delimiter):
key_results = set()
folder_results = set()
if prefix:
for key_name, key in bucket.keys.items():
if key_name.startswith(prefix):
key_without_prefix = key_name.replace(prefix, "", 1)
if delimiter and delimiter in key_without_prefix:
# If delimiter, we need to split out folder_results
key_without_delimiter = key_without_prefix.split(delimiter)[0]
folder_results.add(
"{0}{1}{2}".format(prefix, key_without_delimiter, delimiter)
)
else:
key_results.add(key)
else:
for key_name, key in bucket.keys.items():
if delimiter and delimiter in key_name:
# If delimiter, we need to split out folder_results
folder_results.add(key_name.split(delimiter)[0] + delimiter)
else:
key_results.add(key)
key_results = filter(
lambda key: not isinstance(key, FakeDeleteMarker), key_results
)
key_results = sorted(key_results, key=lambda key: key.name)
folder_results = [
folder_name for folder_name in sorted(folder_results, key=lambda key: key)
]
return key_results, folder_results
def _set_delete_marker(self, bucket_name, key_name):
bucket = self.get_bucket(bucket_name)
bucket.keys[key_name] = FakeDeleteMarker(key=bucket.keys[key_name])
def delete_object(self, bucket_name, key_name, version_id=None):
key_name = clean_key_name(key_name)
bucket = self.get_bucket(bucket_name)
try:
if not bucket.is_versioned:
bucket.keys.pop(key_name)
else:
if version_id is None:
self._set_delete_marker(bucket_name, key_name)
else:
if key_name not in bucket.keys:
raise KeyError
bucket.keys.setlist(
key_name,
[
key
for key in bucket.keys.getlist(key_name)
if str(key.version_id) != str(version_id)
],
)
if not bucket.keys.getlist(key_name):
bucket.keys.pop(key_name)
return True
except KeyError:
return False
def copy_key(
self,
src_bucket_name,
src_key_name,
dest_bucket_name,
dest_key_name,
storage=None,
acl=None,
src_version_id=None,
):
src_key_name = clean_key_name(src_key_name)
dest_key_name = clean_key_name(dest_key_name)
dest_bucket = self.get_bucket(dest_bucket_name)
key = self.get_object(src_bucket_name, src_key_name, version_id=src_version_id)
new_key = key.copy(dest_key_name, dest_bucket.is_versioned)
self.tagger.copy_tags(key.arn, new_key.arn)
if storage is not None:
new_key.set_storage_class(storage)
if acl is not None:
new_key.set_acl(acl)
dest_bucket.keys[dest_key_name] = new_key
def set_bucket_acl(self, bucket_name, acl):
bucket = self.get_bucket(bucket_name)
bucket.set_acl(acl)
def get_bucket_acl(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.acl
def get_bucket_cors(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.cors
def get_bucket_logging(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.logging
def get_bucket_notification_configuration(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.notification_configuration
s3_backend = S3Backend()
| 34.384428 | 120 | 0.587992 |
f357578f5090250a53043e264a3af7d38adcae67 | 648 | py | Python | NameMarkupLanguage/NameMarkInterface/DefTag.py | sonnts996/NameMarkLanguage | 448db602371e91d661bbf3de40070904a89e85fa | [
"MIT"
] | null | null | null | NameMarkupLanguage/NameMarkInterface/DefTag.py | sonnts996/NameMarkLanguage | 448db602371e91d661bbf3de40070904a89e85fa | [
"MIT"
] | null | null | null | NameMarkupLanguage/NameMarkInterface/DefTag.py | sonnts996/NameMarkLanguage | 448db602371e91d661bbf3de40070904a89e85fa | [
"MIT"
] | null | null | null | class DefTag:
def __init__(self):
self.__category__ = ""
self.__mID__ = ""
def category(self) -> str:
pass
def mid(self) -> str:
pass
def nmlId(self) -> str:
pass
def nmlDisplayId(self) -> str:
pass
def setCategory(self, category):
pass
def setId(self, mid):
pass
def __str__(self):
return self.nmlId()
def __eq__(self, other):
if isinstance(other, DefTag):
return other.nmlId() == self.nmlId()
elif isinstance(other, str):
return other == self.nmlId()
else:
return False
| 19.058824 | 48 | 0.521605 |
b929acb3b2361f77373dfc6ad169d9fff4ebde74 | 427 | py | Python | Resources/Work_By_Raj/Google_Calender_api/Resources/__init__.py | Raj-Dave368/Achintya | c1604b10467245025fce08bc2a785ae510d4041d | [
"MIT"
] | null | null | null | Resources/Work_By_Raj/Google_Calender_api/Resources/__init__.py | Raj-Dave368/Achintya | c1604b10467245025fce08bc2a785ae510d4041d | [
"MIT"
] | null | null | null | Resources/Work_By_Raj/Google_Calender_api/Resources/__init__.py | Raj-Dave368/Achintya | c1604b10467245025fce08bc2a785ae510d4041d | [
"MIT"
] | null | null | null | # jay mahakal
import Resources.Work_By_Raj.Google_Calender_api.Resources.Setup
import Resources.Work_By_Raj.Google_Calender_api.Resources.Return_events_info
# below function [Setup.setup_calendar_credentials_return_service()] should run only once
# service = Setup.setup_calendar_credentials_return_service()
# print(Return_events_info.return_events_info("Give details about calendar events for today", service=service))
| 35.583333 | 111 | 0.850117 |
4894005c67ef166c9b5a2f13a7597d4af3204321 | 606 | py | Python | main.py | Etuloser/e-flask | 0cdc6ae324b3cfe696b69b85ad0189516ba9a25f | [
"MIT"
] | null | null | null | main.py | Etuloser/e-flask | 0cdc6ae324b3cfe696b69b85ad0189516ba9a25f | [
"MIT"
] | null | null | null | main.py | Etuloser/e-flask | 0cdc6ae324b3cfe696b69b85ad0189516ba9a25f | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
from config import create_api, create_app, scheduler
from utils.handle import handle_success
dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
if os.path.exists(dotenv_path):
load_dotenv(dotenv_path)
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
api = create_api(app)
@app.route('/')
def root_path():
return handle_success()
@scheduler.task('interval', id='do_job_1', seconds=5, misfire_grace_time=900)
def job1():
print('Job 1 executed')
if __name__ == '__main__':
app.run(host='0.0.0.0', port='4396', debug=True)
| 20.896552 | 77 | 0.722772 |
2c07c8ffec9c54beda6877470b4f004f4d4a23ef | 6,321 | py | Python | plugins/m/math.py | ashwinvis/m.css | f523da7a0fb0bc37f66cd914cd4c7c3f7e46b926 | [
"MIT"
] | null | null | null | plugins/m/math.py | ashwinvis/m.css | f523da7a0fb0bc37f66cd914cd4c7c3f7e46b926 | [
"MIT"
] | null | null | null | plugins/m/math.py | ashwinvis/m.css | f523da7a0fb0bc37f66cd914cd4c7c3f7e46b926 | [
"MIT"
] | null | null | null | #
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019 Vladimír Vondruš <mosra@centrum.cz>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import copy
import html
import os
import re
from docutils import nodes, utils
from docutils.parsers import rst
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
import latex2svg
import latex2svgextra
default_settings = {
'INPUT': '',
'M_MATH_RENDER_AS_CODE': False,
'M_MATH_CACHE_FILE': 'm.math.cache'
}
settings = None
def _is_math_figure(parent):
# The parent has to be a figure, marked as m-figure
if not isinstance(parent, nodes.figure): return False
if 'm-figure' not in parent.get('classes', []): return False
# And as a first visible node of such type
for child in parent:
if not isinstance(child, nodes.Invisible): return False
return True
class Math(rst.Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
parent = self.state.parent
# Fallback rendering as code requested
if settings['M_MATH_RENDER_AS_CODE']:
# If this is a math figure, replace the figure CSS class to have a
# matching border
if _is_math_figure(parent):
parent['classes'][parent['classes'].index('m-figure')] = 'm-code-figure'
content = nodes.raw('', html.escape('\n'.join(self.content)), format='html')
pre = nodes.literal_block('')
pre.append(content)
return [pre]
content = '\n'.join(self.content)
_, svg = latex2svgextra.fetch_cached_or_render("$$" + content + "$$")
# If this is the first real node inside a math figure, put the SVG
# directly inside
if _is_math_figure(parent):
node = nodes.raw(self.block_text, latex2svgextra.patch(content, svg, None, ' class="{}"'.format(' '.join(['m-math'] + self.options.get('classes', [])))), format='html')
node.line = self.content_offset + 1
self.add_name(node)
return [node]
# Otherwise wrap it in a <div class="m-math">
node = nodes.raw(self.block_text, latex2svgextra.patch(content, svg, None, ''), format='html')
node.line = self.content_offset + 1
self.add_name(node)
container = nodes.container(**self.options)
container['classes'] += ['m-math']
container.append(node)
return [container]
def new_page(*args, **kwargs):
latex2svgextra.counter = 0
def math(role, rawtext, text, lineno, inliner, options={}, content=[]):
# In order to properly preserve backslashes (well, and backticks)
text = rawtext[rawtext.find('`') + 1:rawtext.rfind('`')]
# Fallback rendering as code requested
if settings['M_MATH_RENDER_AS_CODE']:
set_classes(options)
classes = []
if 'classes' in options:
classes += options['classes']
del options['classes']
content = nodes.raw('', html.escape(utils.unescape(text)), format='html')
node = nodes.literal(rawtext, '', **options)
node.append(content)
return [node], []
# Apply classes to the <svg> element instead of some outer <span>
set_classes(options)
classes = 'm-math'
if 'classes' in options:
classes += ' ' + ' '.join(options['classes'])
del options['classes']
depth, svg = latex2svgextra.fetch_cached_or_render("$" + text + "$")
attribs = ' class="{}"'.format(classes)
node = nodes.raw(rawtext, latex2svgextra.patch(text, svg, depth, attribs), format='html', **options)
return [node], []
def save_cache(*args, **kwargs):
if settings['M_MATH_CACHE_FILE']:
latex2svgextra.pickle_cache(settings['M_MATH_CACHE_FILE'])
def register_mcss(mcss_settings, hooks_pre_page, hooks_post_run, **kwargs):
global default_settings, settings
settings = copy.deepcopy(default_settings)
for key in settings.keys():
if key in mcss_settings: settings[key] = mcss_settings[key]
if settings['M_MATH_CACHE_FILE']:
settings['M_MATH_CACHE_FILE'] = os.path.join(settings['INPUT'], settings['M_MATH_CACHE_FILE'])
# Ensure that cache is unpickled again if M_MATH_CACHE_FILE is *not* set --
# otherwise tests will sporadically fail.
if settings['M_MATH_CACHE_FILE'] and os.path.exists(settings['M_MATH_CACHE_FILE']):
latex2svgextra.unpickle_cache(settings['M_MATH_CACHE_FILE'])
else:
latex2svgextra.unpickle_cache(None)
hooks_pre_page += [new_page]
hooks_post_run += [save_cache]
rst.directives.register_directive('math', Math)
rst.roles.register_canonical_role('math', math)
# Below is only Pelican-specific functionality. If Pelican is not found, these
# do nothing.
def _configure_pelican(pelicanobj):
register_mcss(mcss_settings=pelicanobj.settings, hooks_pre_page=[], hooks_post_run=[])
def register():
from pelican import signals
signals.initialized.connect(_configure_pelican)
signals.finalized.connect(save_cache)
signals.content_object_init.connect(new_page)
| 36.75 | 180 | 0.677741 |
0dcf07e2cd2d52f47cc764982d7a4744921ecd9c | 224 | py | Python | api/backends/music.py | codezeus/ratingslist | 0f5ddb753ef7cdcfb9444a28c52b0338de43d7a0 | [
"MIT"
] | null | null | null | api/backends/music.py | codezeus/ratingslist | 0f5ddb753ef7cdcfb9444a28c52b0338de43d7a0 | [
"MIT"
] | null | null | null | api/backends/music.py | codezeus/ratingslist | 0f5ddb753ef7cdcfb9444a28c52b0338de43d7a0 | [
"MIT"
] | null | null | null | from api.backends.base import BaseBackend
from api.parsers.metacritic import MetacriticMusicParser
class MusicBackend(BaseBackend):
"""MusicBackend represents a music type"""
parser_class = MetacriticMusicParser()
| 28 | 56 | 0.803571 |
80773b69929403a86db6fd4b3c9cc5935fe30474 | 4,466 | py | Python | src/test/python/tests_comparing.py | stefano-bragaglia/DePYsible | 6b53ede459a10f5e24da89d3ebaa05f08ec7af12 | [
"BSD-2-Clause"
] | 4 | 2018-09-24T23:51:05.000Z | 2021-01-06T09:13:52.000Z | src/test/python/tests_comparing.py | stefano-bragaglia/DefeasiblePython | 6b53ede459a10f5e24da89d3ebaa05f08ec7af12 | [
"BSD-2-Clause"
] | 1 | 2020-05-26T01:14:44.000Z | 2020-05-27T07:54:15.000Z | src/test/python/tests_comparing.py | stefano-bragaglia/DePYsible | 6b53ede459a10f5e24da89d3ebaa05f08ec7af12 | [
"BSD-2-Clause"
] | null | null | null | from unittest import TestCase
from assertpy import assert_that
from depysible.domain.definitions import Program
from depysible.domain.definitions import Rule
from depysible.domain.interpretation import Derivation
from depysible.domain.interpretation import Interpreter
class TestComparing(TestCase):
def test__is_strictly_more_specific__0(self):
p = Program.parse("""
bird(X) <- chicken(X).
bird(X) <- penguin(X).
~flies(X) <- penguin(X).
chicken(tina).
penguin(tweety).
scared(tina).
flies(X) -< bird(X).
flies(X) -< chicken(X), scared(X).
nests_in_trees(X) -< flies(X).
~flies(X) -< chicken(X).
""")
i = Interpreter(p)
d1 = Derivation([Rule.parse('~flies(tina) -< chicken(tina).'), Rule.parse('chicken(tina).')], i)
s1 = d1.get_structure()
d2 = Derivation([
Rule.parse('flies(tina) -< bird(tina).'),
Rule.parse('bird(tina) <- chicken(tina).'),
Rule.parse('chicken(tina).'),
], i)
s2 = d2.get_structure()
result = s1.is_strictly_more_specific_than(s2)
assert_that(result).is_true()
def test__is_strictly_more_specific__1(self):
p = Program.parse("""
bird(X) <- chicken(X).
bird(X) <- penguin(X).
~flies(X) <- penguin(X).
chicken(tina).
penguin(tweety).
scared(tina).
flies(X) -< bird(X).
flies(X) -< chicken(X), scared(X).
nests_in_trees(X) -< flies(X).
~flies(X) -< chicken(X).
""")
i = Interpreter(p)
d1 = Derivation([
Rule.parse('~flies(tina) -< chicken(tina).'),
Rule.parse('chicken(tina).'),
], i)
s1 = d1.get_structure()
d2 = Derivation([
Rule.parse('flies(tina) -< bird(tina).'),
Rule.parse('bird(tina) <- chicken(tina).'),
Rule.parse('chicken(tina).'),
], i)
s2 = d2.get_structure()
result = s2.is_strictly_more_specific_than(s1)
assert_that(result).is_false()
def test__is_strictly_more_specific__2(self):
p = Program.parse("""
bird(X) <- chicken(X).
bird(X) <- penguin(X).
~flies(X) <- penguin(X).
chicken(tina).
penguin(tweety).
scared(tina).
flies(X) -< bird(X).
flies(X) -< chicken(X), scared(X).
nests_in_trees(X) -< flies(X).
~flies(X) -< chicken(X).
""")
i = Interpreter(p)
d1 = Derivation([
Rule.parse('flies(tina) -< chicken(tina), scared(tina).'),
Rule.parse('chicken(tina).'),
Rule.parse('scared(tina).'),
], i)
s1 = d1.get_structure()
d2 = Derivation([
Rule.parse('~flies(tina) -< chicken(tina).'),
Rule.parse('chicken(tina).'),
], i)
s2 = d2.get_structure()
result = s2.is_strictly_more_specific_than(s1)
assert_that(result).is_false()
def test__is_strictly_more_specific__3(self):
p = Program.parse("""
bird(X) <- chicken(X).
bird(X) <- penguin(X).
~flies(X) <- penguin(X).
chicken(tina).
penguin(tweety).
scared(tina).
flies(X) -< bird(X).
flies(X) -< chicken(X), scared(X).
nests_in_trees(X) -< flies(X).
~flies(X) -< chicken(X).
""")
i = Interpreter(p)
d1 = Derivation([
Rule.parse('flies(tina) -< chicken(tina), scared(tina).'),
Rule.parse('chicken(tina).'),
Rule.parse('scared(tina).'),
], i)
s1 = d1.get_structure()
d2 = Derivation([
Rule.parse('~flies(tina) -< chicken(tina).'),
Rule.parse('chicken(tina).'),
], i)
s2 = d2.get_structure()
result = s1.is_strictly_more_specific_than(s2)
assert_that(result).is_true()
| 36.016129 | 104 | 0.468652 |
344e30389e89bf87f24dd1e834eaa058e2fd0371 | 2,974 | py | Python | samples/ch09/vehicles_mlp_mean.py | bmoretz/Python-Machine-Learning | 206e7905cdcade7e322cee7e60a642d55ebc8955 | [
"MIT"
] | null | null | null | samples/ch09/vehicles_mlp_mean.py | bmoretz/Python-Machine-Learning | 206e7905cdcade7e322cee7e60a642d55ebc8955 | [
"MIT"
] | null | null | null | samples/ch09/vehicles_mlp_mean.py | bmoretz/Python-Machine-Learning | 206e7905cdcade7e322cee7e60a642d55ebc8955 | [
"MIT"
] | null | null | null | '''
Created on Nov 26, 2017
@author: henryliu
'''
import sys
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.neural_network import MLPRegressor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def main(argv):
# read in vehicles csv
vehicles_df = pd.read_csv("../data/vehicles.csv", low_memory=False)
vehicles_displ_mpg_all = vehicles_df[['displ', 'UHighway']]
vehicles_displ_mpg = vehicles_displ_mpg_all[vehicles_displ_mpg_all.displ > 0]
half = int(len (vehicles_displ_mpg) / 2)
# create the training set with the first half of data
first_half = vehicles_displ_mpg [:half]
second_half = vehicles_displ_mpg [half:]
first_half_sorted = first_half.sort_values(by=['displ', 'UHighway'])
first_half_grouped_by_mean = pd.DataFrame({'train_mean' : \
first_half_sorted.groupby('displ')['UHighway'].mean()}).reset_index()
first_half_x = first_half_grouped_by_mean ['displ'].values.reshape(-1,1)
print(type(first_half_x))
print(first_half_x.shape)
first_half_y = first_half_grouped_by_mean ['train_mean'].values.reshape(-1,1)
print(first_half_y.shape)
#ax = first_half_grouped_by_median.plot (x = "displ", y = "train_median", c = "b")
#plt.show ()
second_half_sorted = second_half.sort_values(by=['displ', 'UHighway'])
second_half_grouped_by_mean = pd.DataFrame({'test_mean' : \
second_half_sorted.groupby('displ')['UHighway'].mean()}).reset_index()
second_half_x = second_half_grouped_by_mean ['displ'].values.reshape(-1,1)
second_half_y = second_half_grouped_by_mean ['test_mean'].values.reshape(-1,1)
regr = MLPRegressor(hidden_layer_sizes=(60), max_iter=200, alpha=1e-4,
solver='lbfgs', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
# Train the model using the training sets
regr.fit(first_half_x, first_half_y.ravel())
# Make predictions using the testing set
second_half_y_pred = regr.predict(second_half_x)
print("\tModel parameters: ", regr.get_params(deep=False))
# mean squared error
mse = mean_squared_error(second_half_y, second_half_y_pred)
rmse = np.sqrt(mse)
print("Mean squared error: %.2f" % mse)
print("Root mean squared error: %.2f" % rmse)
# Explained variance score: 1 is perfect prediction
print('R-squared score: %.2f' % r2_score(second_half_y, second_half_y_pred))
# Plot outputs
plt_train = plt.scatter(first_half_x, first_half_y, color='green')
plt_test = plt.scatter(second_half_x, second_half_y, color='red')
plt.plot(second_half_x, second_half_y_pred, color='blue', linewidth=2)
plt.legend ((plt_train, plt_test), ('train', 'test'))
plt.xlabel ("Engine displacement (liter)")
plt.ylabel ("Fuel economy (MPG)")
plt.show()
# entry point to the main function
if __name__ == '__main__':
main (sys.argv) | 36.716049 | 86 | 0.694687 |
9c8346561be194cfbb16f83b73aa35a26d38859a | 897 | py | Python | PYex/Rambit/Rambit/source/Game.py | iPatso/PyGameProjs | b1eed993649a1e90da5214a1604fdb59d65ff0b4 | [
"Apache-2.0"
] | null | null | null | PYex/Rambit/Rambit/source/Game.py | iPatso/PyGameProjs | b1eed993649a1e90da5214a1604fdb59d65ff0b4 | [
"Apache-2.0"
] | null | null | null | PYex/Rambit/Rambit/source/Game.py | iPatso/PyGameProjs | b1eed993649a1e90da5214a1604fdb59d65ff0b4 | [
"Apache-2.0"
] | null | null | null | import pygame
import Resources, ScreenManager, Intro
class Game():
def __init__(self):
pygame.init()
pygame.display.set_caption('NOMEDOJOGO')
pygame.mouse.set_visible(False)
icon = pygame.Surface((32,32))
iconImage = pygame.image.load("res/img/icon.png")
iconRect = iconImage.get_rect()
icon.blit(iconImage,iconRect)
pygame.display.set_icon(icon)
pygame.display.set_mode(Resources.DEFAULT_SCREEN_SIZE, pygame.FULLSCREEN)
manager = ScreenManager.ScreenManager()
#s = Stage.Stage1(manager)
pygame.mixer.init()
manager.setBaseObjectToUpdate(Intro.Intro(manager))
manager.run()
pygame.mixer.quit()
pygame.quit()
# cria uma classe Game, que ja chama o metodo ScreenManager.run() que executa o jogo
Game()
| 28.03125 | 84 | 0.617614 |
21a3eeeebaf2dddb9f0986dcdfe438175af8d575 | 3,762 | py | Python | tensorflow_datasets/image_classification/imagenet_a.py | sourcery-ai-bot/datasets | b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28 | [
"Apache-2.0"
] | 1 | 2021-05-10T10:41:27.000Z | 2021-05-10T10:41:27.000Z | tensorflow_datasets/image_classification/imagenet_a.py | sourcery-ai-bot/datasets | b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28 | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/image_classification/imagenet_a.py | sourcery-ai-bot/datasets | b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28 | [
"Apache-2.0"
] | 1 | 2021-07-04T11:07:35.000Z | 2021-07-04T11:07:35.000Z | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The ImageNet-A image classification dataset."""
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = r"""
@article{hendrycks2019nae,
title={Natural Adversarial Examples},
author={Dan Hendrycks and Kevin Zhao and Steven Basart and Jacob Steinhardt and Dawn Song},
journal={arXiv preprint arXiv:1907.07174},
year={2019}
}
"""
_DESCRIPTION = """
ImageNet-A is a set of images labelled with ImageNet labels that were obtained
by collecting new data and keeping only those images that ResNet-50 models fail
to correctly classify. For more details please refer to the paper.
The label space is the same as that of ImageNet2012. Each example is
represented as a dictionary with the following keys:
* 'image': The image, a (H, W, 3)-tensor.
* 'label': An integer in the range [0, 1000).
* 'file_name': A unique sting identifying the example within the dataset.
"""
_IMAGENET_LABELS_FILENAME = r'image_classification/imagenet2012_labels.txt'
_IMAGENET_A_URL = r'https://people.eecs.berkeley.edu/~hendrycks/imagenet-a.tar'
class ImagenetA(tfds.core.GeneratorBasedBuilder):
"""Natural adversarial examples with ImageNet labels."""
VERSION = tfds.core.Version('0.1.0')
def _info(self):
names_file = tfds.core.tfds_path(_IMAGENET_LABELS_FILENAME)
return tfds.core.DatasetInfo(
builder=self,
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(encoding_format='jpeg'),
'label': tfds.features.ClassLabel(names_file=names_file),
'file_name': tfds.features.Text(),
}),
# Used if as_supervised=True in # builder.as_dataset.
supervised_keys=('image', 'label'),
# Homepage of the dataset for documentation
homepage='https://github.com/hendrycks/natural-adv-examples',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns a SplitGenerator for the test set."""
imagenet_a_root = os.path.join(
dl_manager.download_and_extract(_IMAGENET_A_URL), 'imagenet-a')
return [
tfds.core.SplitGenerator(
# The dataset provides only a test split.
name=tfds.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={'imagenet_a_root': imagenet_a_root},
),
]
def _generate_examples(self, imagenet_a_root):
"""Yields the examples."""
# The directory structure is `imagenet-a/imagenet_synset_id/filename.jpg`.
for class_synset in tf.io.gfile.listdir(imagenet_a_root):
class_dir = os.path.join(imagenet_a_root, class_synset)
if not tf.io.gfile.isdir(class_dir):
continue
for image_filename in tf.io.gfile.listdir(class_dir):
image_path = os.path.join(class_dir, image_filename)
features = {
'image': image_path,
'label': class_synset,
'file_name': image_filename,
}
yield image_filename, features
| 36.882353 | 93 | 0.707337 |
a12edca1065e665d3be4c4e907ca89dc38c577cd | 2,230 | py | Python | src/lib/utils/ukf_tracker.py | gafaua/PolyTrack | 5a4b409732b9396be8271f5cba4ad426808d5af5 | [
"MIT"
] | 10 | 2021-11-07T04:25:08.000Z | 2022-03-25T03:33:21.000Z | src/lib/utils/ukf_tracker.py | gafaua/PolyTrack | 5a4b409732b9396be8271f5cba4ad426808d5af5 | [
"MIT"
] | null | null | null | src/lib/utils/ukf_tracker.py | gafaua/PolyTrack | 5a4b409732b9396be8271f5cba4ad426808d5af5 | [
"MIT"
] | 6 | 2021-11-03T21:27:06.000Z | 2022-03-27T17:27:40.000Z | from filterpy.kalman import UnscentedKalmanFilter as UKF
from filterpy.kalman import MerweScaledSigmaPoints
from filterpy.common import Q_discrete_white_noise, Q_continuous_white_noise
import numpy as np
points = MerweScaledSigmaPoints(n=6, alpha=1e-3, beta=2., kappa=3)
def hx(x):
return x[[0,3]]
def fx(x, dt):
r"""
State transition function
x has the form [x, dx/dt, d2x/dt2, y, dy/dt, d2y/dt2]
dt is the delta time, amount of time between state transitions
"""
dt2 = (dt*dt)/2
F = np.array([[1, dt, dt2, 0, 0, 0],
[0, 1, dt, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, dt, dt2],
[0, 0, 0, 0, 1, dt],
[0, 0, 0, 0, 0, 1]])
return F @ x
class UKF_Tracker(object):
def __init__(self, pos, dt):
self.ukf = UKF(dim_x=6, dim_z=2, dt=dt, hx=hx, fx=fx ,points=points)
# Standard deviation confidence for centers prediction TODO check if value is ok
pos_std = 20
# Measurement noise matrix 2x2 for measurements of x and y positions
self.ukf.R = np.diag([pos_std**2, pos_std**2])
# Process noise matrix, TODO check var value is ok, possibility to use Q_continuous_white_noise()
self.ukf.Q = Q_discrete_white_noise(dim=3, dt=dt, block_size=2, var=0.01)
#self.ukf.Q = Q_continuous_white_noise(dim=3,block_size=2)
# Init
self.ukf.x = np.array([pos[0], 0, 0., pos[1], 0, 0.]) # Initial state
self.ukf.P = np.diag([10**2, 20**2, 20**2, 10**2, 20**2, 20**2]) # Covariance matrix, at first we assume that all variables are independant, thus using a diagonal matrix
self.state_history = []
def predict(self, dt=None):
self.ukf.predict(dt=dt)
# TODO maybe add some checks to the covariance matrix to verify uncertainty
# about the current state
state = self.ukf.x
self.state_history.append(state)
return state
def update_match(self, pos, dt=None):
self.ukf.predict(dt=None)
self.ukf.update(pos)
state = self.ukf.x
self.state_history.append(state)
return state
| 34.84375 | 178 | 0.59417 |
c8d9ba10f72f595ca99a604a87aca8d47861460b | 39,559 | py | Python | old_projects/eulers_characteristic_formula.py | dezren39/manim | 80d7742446c588dc296bd3afb3465a63b63383db | [
"MIT"
] | 1 | 2020-07-05T12:56:31.000Z | 2020-07-05T12:56:31.000Z | old_projects/eulers_characteristic_formula.py | elphasminyato/manim | 8488b621ef3367e6a911354d2765684321fed20e | [
"MIT"
] | 3 | 2021-09-08T02:19:38.000Z | 2022-03-12T00:41:00.000Z | old_projects/eulers_characteristic_formula.py | dezren39/manim | 80d7742446c588dc296bd3afb3465a63b63383db | [
"MIT"
] | 1 | 2020-07-05T12:56:33.000Z | 2020-07-05T12:56:33.000Z | #!/usr/bin/env python
import numpy as np
import itertools as it
from copy import deepcopy
import sys
from animation import *
from mobject import *
from constants import *
from mobject.region import *
import displayer as disp
from scene.scene import Scene, GraphScene
from scene.graphs import *
from moser_main import EulersFormula
from script_wrapper import command_line_create_scene
MOVIE_PREFIX = "ecf_graph_scenes/"
RANDOLPH_SCALE_FACTOR = 0.3
EDGE_ANNOTATION_SCALE_FACTOR = 0.7
DUAL_CYCLE = [3, 4, 5, 6, 1, 0, 2, 3]
class EulersFormulaWords(Scene):
def construct(self):
self.add(TexMobject("V-E+F=2"))
class TheTheoremWords(Scene):
def construct(self):
self.add(TextMobject("The Theorem:"))
class ProofAtLastWords(Scene):
def construct(self):
self.add(TextMobject("The Proof At Last..."))
class DualSpanningTreeWords(Scene):
def construct(self):
self.add(TextMobject("Spanning trees have duals too!"))
class PreferOtherProofDialogue(Scene):
def construct(self):
teacher = Face("talking").shift(2*LEFT)
student = Face("straight").shift(2*RIGHT)
teacher_bubble = SpeechBubble(LEFT).speak_from(teacher)
student_bubble = SpeechBubble(RIGHT).speak_from(student)
teacher_bubble.write("Look at this \\\\ elegant proof!")
student_bubble.write("I prefer the \\\\ other proof.")
self.add(student, teacher, teacher_bubble, teacher_bubble.text)
self.wait(2)
self.play(Transform(
Dot(student_bubble.tip).set_color("black"),
Mobject(student_bubble, student_bubble.text)
))
self.wait(2)
self.remove(teacher_bubble.text)
teacher_bubble.write("Does that make this \\\\ any less elegant?")
self.add(teacher_bubble.text)
self.wait(2)
class IllustrateDuality(GraphScene):
def construct(self):
GraphScene.construct(self)
self.generate_dual_graph()
self.add(TextMobject("Duality").to_edge(UP))
self.remove(*self.vertices)
def special_alpha(t):
if t > 0.5:
t = 1 - t
if t < 0.25:
return smooth(4*t)
else:
return 1
kwargs = {
"run_time" : 5.0,
"rate_func" : special_alpha
}
self.play(*[
Transform(*edge_pair, **kwargs)
for edge_pair in zip(self.edges, self.dual_edges)
] + [
Transform(
Mobject(*[
self.vertices[index]
for index in cycle
]),
dv,
**kwargs
)
for cycle, dv in zip(
self.graph.region_cycles,
self.dual_vertices
)
])
self.wait()
class IntroduceGraph(GraphScene):
def construct(self):
GraphScene.construct(self)
tweaked_graph = deepcopy(self.graph)
for index in 2, 4:
tweaked_graph.vertices[index] += 2.8*RIGHT + 1.8*DOWN
tweaked_self = GraphScene(tweaked_graph)
edges_to_remove = [
self.edges[self.graph.edges.index(pair)]
for pair in [(4, 5), (0, 5), (1, 5), (7, 1), (8, 3)]
]
connected, planar, graph = TextMobject([
"Connected ", "Planar ", "Graph"
]).to_edge(UP).split()
not_okay = TextMobject("Not Okay").set_color("red")
planar_explanation = TextMobject("""
(``Planar'' just means we can draw it without
intersecting lines)
""", size = "\\small")
planar_explanation.shift(planar.get_center() + 0.5*DOWN)
self.draw_vertices()
self.draw_edges()
self.clear()
self.add(*self.vertices + self.edges)
self.wait()
self.add(graph)
self.wait()
kwargs = {
"rate_func" : there_and_back,
"run_time" : 5.0
}
self.add(not_okay)
self.play(*[
Transform(*pair, **kwargs)
for pair in zip(
self.edges + self.vertices,
tweaked_self.edges + tweaked_self.vertices,
)
])
self.remove(not_okay)
self.add(planar, planar_explanation)
self.wait(2)
self.remove(planar_explanation)
self.add(not_okay)
self.remove(*edges_to_remove)
self.play(ShowCreation(
Mobject(*edges_to_remove),
rate_func = lambda t : 1 - t,
run_time = 1.0
))
self.wait(2)
self.remove(not_okay)
self.add(connected, *edges_to_remove)
self.wait()
class OldIntroduceGraphs(GraphScene):
def construct(self):
GraphScene.construct(self)
self.draw_vertices()
self.draw_edges()
self.wait()
self.clear()
self.add(*self.edges)
self.replace_vertices_with(Face().scale(0.4))
friends = TextMobject("Friends").scale(EDGE_ANNOTATION_SCALE_FACTOR)
self.annotate_edges(friends.shift((0, friends.get_height()/2, 0)))
self.play(*[
CounterclockwiseTransform(vertex, Dot(point))
for vertex, point in zip(self.vertices, self.points)
]+[
Transform(ann, line)
for ann, line in zip(
self.edge_annotations,
self.edges
)
])
self.wait()
class PlanarGraphDefinition(Scene):
def construct(self):
Not, quote, planar, end_quote = TextMobject([
"Not \\\\", "``", "Planar", "''",
# "no matter how \\\\ hard you try"
]).split()
shift_val = Mobject(Not, planar).to_corner().get_center()
Not.set_color("red").shift(shift_val)
graphs = [
Mobject(*GraphScene(g).mobjects)
for g in [
CubeGraph(),
CompleteGraph(5),
OctohedronGraph()
]
]
self.add(quote, planar, end_quote)
self.wait()
self.play(
FadeOut(quote),
FadeOut(end_quote),
ApplyMethod(planar.shift, shift_val),
FadeIn(graphs[0]),
run_time = 1.5
)
self.wait()
self.remove(graphs[0])
self.add(graphs[1])
planar.set_color("red")
self.add(Not)
self.wait(2)
planar.set_color("white")
self.remove(Not)
self.remove(graphs[1])
self.add(graphs[2])
self.wait(2)
class TerminologyFromPolyhedra(GraphScene):
args_list = [(CubeGraph(),)]
def construct(self):
GraphScene.construct(self)
rot_kwargs = {
"radians" : np.pi / 3,
"run_time" : 5.0
}
vertices = [
point / 2 + OUT if abs(point[0]) == 2 else point + IN
for point in self.points
]
cube = Mobject(*[
Line(vertices[edge[0]], vertices[edge[1]])
for edge in self.graph.edges
])
cube.rotate(-np.pi/3, [0, 0, 1])
cube.rotate(-np.pi/3, [0, 1, 0])
dots_to_vertices = TextMobject("Dots $\\to$ Vertices").to_corner()
lines_to_edges = TextMobject("Lines $\\to$ Edges").to_corner()
regions_to_faces = TextMobject("Regions $\\to$ Faces").to_corner()
self.clear()
# self.play(TransformAnimations(
# Rotating(Dodecahedron(), **rot_kwargs),
# Rotating(cube, **rot_kwargs)
# ))
self.play(Rotating(cube, **rot_kwargs))
self.clear()
self.play(*[
Transform(l1, l2)
for l1, l2 in zip(cube.split(), self.edges)
])
self.wait()
self.add(dots_to_vertices)
self.play(*[
ShowCreation(dot, run_time = 1.0)
for dot in self.vertices
])
self.wait(2)
self.remove(dots_to_vertices, *self.vertices)
self.add(lines_to_edges)
self.play(ApplyMethod(
Mobject(*self.edges).set_color, "yellow"
))
self.wait(2)
self.clear()
self.add(*self.edges)
self.add(regions_to_faces)
self.generate_regions()
for region in self.regions:
self.set_color_region(region)
self.wait(3.0)
class ThreePiecesOfTerminology(GraphScene):
def construct(self):
GraphScene.construct(self)
terms = cycles, spanning_trees, dual_graphs = [
TextMobject(phrase).shift(y*UP).to_edge()
for phrase, y in [
("Cycles", 3),
("Spanning Trees", 1),
("Dual Graphs", -1),
]
]
self.generate_spanning_tree()
scale_factor = 1.2
def accent(mobject, color = "yellow"):
return mobject.scale_in_place(scale_factor).set_color(color)
def tone_down(mobject):
return mobject.scale_in_place(1.0/scale_factor).set_color("white")
self.add(accent(cycles))
self.trace_cycle(run_time = 1.0)
self.wait()
tone_down(cycles)
self.remove(self.traced_cycle)
self.add(accent(spanning_trees))
self.play(ShowCreation(self.spanning_tree), run_time = 1.0)
self.wait()
tone_down(spanning_trees)
self.remove(self.spanning_tree)
self.add(accent(dual_graphs, "red"))
self.generate_dual_graph()
for mob in self.mobjects:
mob.fade
self.play(*[
ShowCreation(mob, run_time = 1.0)
for mob in self.dual_vertices + self.dual_edges
])
self.wait()
self.clear()
self.play(ApplyMethod(
Mobject(*terms).center
))
self.wait()
class WalkingRandolph(GraphScene):
args_list = [
(SampleGraph(), [0, 1, 7, 8]),
]
@staticmethod
def args_to_string(graph, path):
return str(graph) + "".join(map(str, path))
def __init__(self, graph, path, *args, **kwargs):
self.path = path
GraphScene.__init__(self, graph, *args, **kwargs)
def construct(self):
GraphScene.construct(self)
point_path = [self.points[i] for i in self.path]
randy = Randolph()
randy.scale(RANDOLPH_SCALE_FACTOR)
randy.move_to(point_path[0])
for next, last in zip(point_path[1:], point_path):
self.play(
WalkPiCreature(randy, next),
ShowCreation(Line(last, next).set_color("yellow")),
run_time = 2.0
)
self.randy = randy
class PathExamples(GraphScene):
args_list = [(SampleGraph(),)]
def construct(self):
GraphScene.construct(self)
paths = [
(1, 2, 4, 5, 6),
(6, 7, 1, 3),
]
non_paths = [
[(0, 1), (7, 8), (5, 6),],
[(5, 0), (0, 2), (0, 1)],
]
valid_path = TextMobject("Valid \\\\ Path").set_color("green")
not_a_path = TextMobject("Not a \\\\ Path").set_color("red")
for mob in valid_path, not_a_path:
mob.to_edge(UP)
kwargs = {"run_time" : 1.0}
for path, non_path in zip(paths, non_paths):
path_lines = Mobject(*[
Line(
self.points[path[i]],
self.points[path[i+1]]
).set_color("yellow")
for i in range(len(path) - 1)
])
non_path_lines = Mobject(*[
Line(
self.points[pp[0]],
self.points[pp[1]],
).set_color("yellow")
for pp in non_path
])
self.remove(not_a_path)
self.add(valid_path)
self.play(ShowCreation(path_lines, **kwargs))
self.wait(2)
self.remove(path_lines)
self.remove(valid_path)
self.add(not_a_path)
self.play(ShowCreation(non_path_lines, **kwargs))
self.wait(2)
self.remove(non_path_lines)
class IntroduceCycle(WalkingRandolph):
args_list = [
(SampleGraph(), [0, 1, 3, 2, 0])
]
def construct(self):
WalkingRandolph.construct(self)
self.remove(self.randy)
encompassed_cycles = filter(
lambda c : set(c).issubset(self.path),
self.graph.region_cycles
)
regions = [
self.region_from_cycle(cycle)
for cycle in encompassed_cycles
]
for region in regions:
self.set_color_region(region)
self.wait()
class IntroduceRandolph(GraphScene):
def construct(self):
GraphScene.construct(self)
randy = Randolph().move_to((-3, 0, 0))
name = TextMobject("Randolph")
self.play(Transform(
randy,
deepcopy(randy).scale(RANDOLPH_SCALE_FACTOR).move_to(self.points[0]),
))
self.wait()
name.shift((0, 1, 0))
self.add(name)
self.wait()
class DefineSpanningTree(GraphScene):
def construct(self):
GraphScene.construct(self)
randy = Randolph()
randy.scale(RANDOLPH_SCALE_FACTOR).move_to(self.points[0])
dollar_signs = TextMobject("\\$\\$")
dollar_signs.scale(EDGE_ANNOTATION_SCALE_FACTOR)
dollar_signs = Mobject(*[
deepcopy(dollar_signs).shift(edge.get_center())
for edge in self.edges
])
unneeded = TextMobject("unneeded!")
unneeded.scale(EDGE_ANNOTATION_SCALE_FACTOR)
self.generate_spanning_tree()
def green_dot_at_index(index):
return Dot(
self.points[index],
radius = 2*Dot.DEFAULT_RADIUS,
color = "lightgreen",
)
def out_of_spanning_set(point_pair):
stip = self.spanning_tree_index_pairs
return point_pair not in stip and \
tuple(reversed(point_pair)) not in stip
self.add(randy)
self.accent_vertices(run_time = 2.0)
self.add(dollar_signs)
self.wait(2)
self.remove(dollar_signs)
run_time_per_branch = 0.5
self.play(
ShowCreation(green_dot_at_index(0)),
run_time = run_time_per_branch
)
for pair in self.spanning_tree_index_pairs:
self.play(ShowCreation(
Line(
self.points[pair[0]],
self.points[pair[1]]
).set_color("yellow"),
run_time = run_time_per_branch
))
self.play(ShowCreation(
green_dot_at_index(pair[1]),
run_time = run_time_per_branch
))
self.wait(2)
unneeded_edges = filter(out_of_spanning_set, self.graph.edges)
for edge, limit in zip(unneeded_edges, range(5)):
line = Line(self.points[edge[0]], self.points[edge[1]])
line.set_color("red")
self.play(ShowCreation(line, run_time = 1.0))
self.add(unneeded.center().shift(line.get_center() + 0.2*UP))
self.wait()
self.remove(line, unneeded)
class NamingTree(GraphScene):
def construct(self):
GraphScene.construct(self)
self.generate_spanning_tree()
self.generate_treeified_spanning_tree()
branches = self.spanning_tree.split()
branches_copy = deepcopy(branches)
treeified_branches = self.treeified_spanning_tree.split()
tree = TextMobject("``Tree''").to_edge(UP)
spanning_tree = TextMobject("``Spanning Tree''").to_edge(UP)
self.add(*branches)
self.play(
FadeOut(Mobject(*self.edges + self.vertices)),
Animation(Mobject(*branches)),
)
self.clear()
self.add(tree, *branches)
self.wait()
self.play(*[
Transform(b1, b2, run_time = 2)
for b1, b2 in zip(branches, treeified_branches)
])
self.wait()
self.play(*[
FadeIn(mob)
for mob in self.edges + self.vertices
] + [
Transform(b1, b2, run_time = 2)
for b1, b2 in zip(branches, branches_copy)
])
self.accent_vertices(run_time = 2)
self.remove(tree)
self.add(spanning_tree)
self.wait(2)
class DualGraph(GraphScene):
def construct(self):
GraphScene.construct(self)
self.generate_dual_graph()
self.add(TextMobject("Dual Graph").to_edge(UP).shift(2*LEFT))
self.play(*[
ShowCreation(mob)
for mob in self.dual_edges + self.dual_vertices
])
self.wait()
class FacebookLogo(Scene):
def construct(self):
im = ImageMobject("facebook_full_logo", invert = False)
self.add(im.scale(0.7))
class FacebookGraph(GraphScene):
def construct(self):
GraphScene.construct(self)
account = ImageMobject("facebook_silhouette", invert = False)
account.scale(0.05)
logo = ImageMobject("facebook_logo", invert = False)
logo.scale(0.1)
logo.shift(0.2*LEFT + 0.1*UP)
account.add(logo).center()
account.shift(0.2*LEFT + 0.1*UP)
friends = TexMobject(
"\\leftarrow \\text{friends} \\rightarrow"
).scale(0.5*EDGE_ANNOTATION_SCALE_FACTOR)
self.clear()
accounts = [
deepcopy(account).shift(point)
for point in self.points
]
self.add(*accounts)
self.wait()
self.annotate_edges(friends)
self.wait()
self.play(*[
CounterclockwiseTransform(account, vertex)
for account, vertex in zip(accounts, self.vertices)
])
self.wait()
self.play(*[
Transform(ann, edge)
for ann, edge in zip(self.edge_annotations, self.edges)
])
self.wait()
class FacebookGraphAsAbstractSet(Scene):
def construct(self):
names = [
"Louis",
"Randolph",
"Mortimer",
"Billy Ray",
"Penelope",
]
friend_pairs = [
(0, 1),
(0, 2),
(1, 2),
(3, 0),
(4, 0),
(1, 3),
(1, 2),
]
names_string = "\\\\".join(names + ["$\\vdots$"])
friends_string = "\\\\".join([
"\\text{%s}&\\leftrightarrow\\text{%s}"%(names[i],names[j])
for i, j in friend_pairs
] + ["\\vdots"])
names_mob = TextMobject(names_string).shift(3*LEFT)
friends_mob = TexMobject(
friends_string, size = "\\Large"
).shift(3*RIGHT)
accounts = TextMobject("\\textbf{Accounts}")
accounts.shift(3*LEFT).to_edge(UP)
friendships = TextMobject("\\textbf{Friendships}")
friendships.shift(3*RIGHT).to_edge(UP)
lines = Mobject(
Line(UP*FRAME_Y_RADIUS, DOWN*FRAME_Y_RADIUS),
Line(LEFT*FRAME_X_RADIUS + 3*UP, RIGHT*FRAME_X_RADIUS + 3*UP)
).set_color("white")
self.add(accounts, friendships, lines)
self.wait()
for mob in names_mob, friends_mob:
self.play(ShowCreation(
mob, run_time = 1.0
))
self.wait()
class ExamplesOfGraphs(GraphScene):
def construct(self):
buff = 0.5
self.graph.vertices = map(
lambda v : v + DOWN + RIGHT,
self.graph.vertices
)
GraphScene.construct(self)
self.generate_regions()
objects, notions = Mobject(*TextMobject(
["Objects \\quad\\quad ", "Thing that connects objects"]
)).to_corner().shift(0.5*RIGHT).split()
horizontal_line = Line(
(-FRAME_X_RADIUS, FRAME_Y_RADIUS-1, 0),
(max(notions.points[:,0]), FRAME_Y_RADIUS-1, 0)
)
vert_line_x_val = min(notions.points[:,0]) - buff
vertical_line = Line(
(vert_line_x_val, FRAME_Y_RADIUS, 0),
(vert_line_x_val,-FRAME_Y_RADIUS, 0)
)
objects_and_notions = [
("Facebook accounts", "Friendship"),
("English Words", "Differ by One Letter"),
("Mathematicians", "Coauthorship"),
("Neurons", "Synapses"),
(
"Regions our graph \\\\ cuts the plane into",
"Shared edges"
)
]
self.clear()
self.add(objects, notions, horizontal_line, vertical_line)
for (obj, notion), height in zip(objects_and_notions, it.count(2, -1)):
obj_mob = TextMobject(obj, size = "\\small").to_edge(LEFT)
not_mob = TextMobject(notion, size = "\\small").to_edge(LEFT)
not_mob.shift((vert_line_x_val + FRAME_X_RADIUS)*RIGHT)
obj_mob.shift(height*UP)
not_mob.shift(height*UP)
if obj.startswith("Regions"):
self.handle_dual_graph(obj_mob, not_mob)
elif obj.startswith("English"):
self.handle_english_words(obj_mob, not_mob)
else:
self.add(obj_mob)
self.wait()
self.add(not_mob)
self.wait()
def handle_english_words(self, words1, words2):
words = map(TextMobject, ["graph", "grape", "gape", "gripe"])
words[0].shift(RIGHT)
words[1].shift(3*RIGHT)
words[2].shift(3*RIGHT + 2*UP)
words[3].shift(3*RIGHT + 2*DOWN)
lines = [
Line(*pair)
for pair in [
(
words[0].get_center() + RIGHT*words[0].get_width()/2,
words[1].get_center() + LEFT*words[1].get_width()/2
),(
words[1].get_center() + UP*words[1].get_height()/2,
words[2].get_center() + DOWN*words[2].get_height()/2
),(
words[1].get_center() + DOWN*words[1].get_height()/2,
words[3].get_center() + UP*words[3].get_height()/2
)
]
]
comp_words = Mobject(*words)
comp_lines = Mobject(*lines)
self.add(words1)
self.play(ShowCreation(comp_words, run_time = 1.0))
self.wait()
self.add(words2)
self.play(ShowCreation(comp_lines, run_time = 1.0))
self.wait()
self.remove(comp_words, comp_lines)
def handle_dual_graph(self, words1, words2):
words1.set_color("yellow")
words2.set_color("yellow")
connected = TextMobject("Connected")
connected.set_color("lightgreen")
not_connected = TextMobject("Not Connected")
not_connected.set_color("red")
for mob in connected, not_connected:
mob.shift(self.points[3] + UP)
self.play(*[
ShowCreation(mob, run_time = 1.0)
for mob in self.edges + self.vertices
])
self.wait()
for region in self.regions:
self.set_color_region(region)
self.add(words1)
self.wait()
self.reset_background()
self.add(words2)
region_pairs = it.combinations(self.graph.region_cycles, 2)
for x in range(6):
want_matching = (x%2 == 0)
found = False
while True:
try:
cycle1, cycle2 = region_pairs.next()
except:
return
shared = set(cycle1).intersection(cycle2)
if len(shared) == 2 and want_matching:
break
if len(shared) != 2 and not want_matching:
break
for cycle in cycle1, cycle2:
index = self.graph.region_cycles.index(cycle)
self.set_color_region(self.regions[index])
if want_matching:
self.remove(not_connected)
self.add(connected)
tup = tuple(shared)
if tup not in self.graph.edges:
tup = tuple(reversed(tup))
edge = deepcopy(self.edges[self.graph.edges.index(tup)])
edge.set_color("red")
self.play(ShowCreation(edge), run_time = 1.0)
self.wait()
self.remove(edge)
else:
self.remove(connected)
self.add(not_connected)
self.wait(2)
self.reset_background()
class DrawDualGraph(GraphScene):
def construct(self):
GraphScene.construct(self)
self.generate_regions()
self.generate_dual_graph()
region_mobs = [
ImageMobject(disp.paint_region(reg, self.background), invert = False)
for reg in self.regions
]
for region, mob in zip(self.regions, region_mobs):
self.set_color_region(region, mob.get_color())
outer_region = self.regions.pop()
outer_region_mob = region_mobs.pop()
outer_dual_vertex = self.dual_vertices.pop()
internal_edges = filter(
lambda e : abs(e.start[0]) < FRAME_X_RADIUS and \
abs(e.end[0]) < FRAME_X_RADIUS and \
abs(e.start[1]) < FRAME_Y_RADIUS and \
abs(e.end[1]) < FRAME_Y_RADIUS,
self.dual_edges
)
external_edges = filter(
lambda e : e not in internal_edges,
self.dual_edges
)
self.wait()
self.reset_background()
self.set_color_region(outer_region, outer_region_mob.get_color())
self.play(*[
Transform(reg_mob, dot)
for reg_mob, dot in zip(region_mobs, self.dual_vertices)
])
self.wait()
self.reset_background()
self.play(ApplyFunction(
lambda p : (FRAME_X_RADIUS + FRAME_Y_RADIUS)*p/np.linalg.norm(p),
outer_region_mob
))
self.wait()
for edges in internal_edges, external_edges:
self.play(*[
ShowCreation(edge, run_time = 2.0)
for edge in edges
])
self.wait()
class EdgesAreTheSame(GraphScene):
def construct(self):
GraphScene.construct(self)
self.generate_dual_graph()
self.remove(*self.vertices)
self.add(*self.dual_edges)
self.wait()
self.play(*[
Transform(*pair, run_time = 2.0)
for pair in zip(self.dual_edges, self.edges)
])
self.wait()
self.add(
TextMobject("""
(Or at least I would argue they should \\\\
be thought of as the same thing.)
""", size = "\\small").to_edge(UP)
)
self.wait()
class ListOfCorrespondances(Scene):
def construct(self):
buff = 0.5
correspondances = [
["Regions cut out by", "Vertices of"],
["Edges of", "Edges of"],
["Cycles of", "Connected components of"],
["Connected components of", "Cycles of"],
["Spanning tree in", "Complement of spanning tree in"],
["", "Dual of"],
]
for corr in correspondances:
corr[0] += " original graph"
corr[1] += " dual graph"
arrow = TexMobject("\\leftrightarrow", size = "\\large")
lines = []
for corr, height in zip(correspondances, it.count(3, -1)):
left = TextMobject(corr[0], size = "\\small")
right = TextMobject(corr[1], size = "\\small")
this_arrow = deepcopy(arrow)
for mob in left, right, this_arrow:
mob.shift(height*UP)
arrow_xs = this_arrow.points[:,0]
left.to_edge(RIGHT)
left.shift((min(arrow_xs) - FRAME_X_RADIUS, 0, 0))
right.to_edge(LEFT)
right.shift((max(arrow_xs) + FRAME_X_RADIUS, 0, 0))
lines.append(Mobject(left, right, this_arrow))
last = None
for line in lines:
self.add(line.set_color("yellow"))
if last:
last.set_color("white")
last = line
self.wait(1)
class CyclesCorrespondWithConnectedComponents(GraphScene):
args_list = [(SampleGraph(),)]
def construct(self):
GraphScene.construct(self)
self.generate_regions()
self.generate_dual_graph()
cycle = [4, 2, 1, 5, 4]
enclosed_regions = [0, 2, 3, 4]
dual_cycle = DUAL_CYCLE
enclosed_vertices = [0, 1]
randy = Randolph()
randy.scale(RANDOLPH_SCALE_FACTOR)
randy.move_to(self.points[cycle[0]])
lines_to_remove = []
for last, next in zip(cycle, cycle[1:]):
line = Line(self.points[last], self.points[next])
line.set_color("yellow")
self.play(
ShowCreation(line),
WalkPiCreature(randy, self.points[next]),
run_time = 1.0
)
lines_to_remove.append(line)
self.wait()
self.remove(randy, *lines_to_remove)
for region in np.array(self.regions)[enclosed_regions]:
self.set_color_region(region)
self.wait(2)
self.reset_background()
lines = Mobject(*[
Line(self.dual_points[last], self.dual_points[next])
for last, next in zip(dual_cycle, dual_cycle[1:])
]).set_color("red")
self.play(ShowCreation(lines))
self.play(*[
Transform(v, Dot(
v.get_center(),
radius = 3*Dot.DEFAULT_RADIUS
).set_color("green"))
for v in np.array(self.vertices)[enclosed_vertices]
] + [
ApplyMethod(self.edges[0].set_color, "green")
])
self.wait()
class IntroduceMortimer(GraphScene):
args_list = [(SampleGraph(),)]
def construct(self):
GraphScene.construct(self)
self.generate_dual_graph()
self.generate_regions()
randy = Randolph().shift(LEFT)
morty = Mortimer().shift(RIGHT)
name = TextMobject("Mortimer")
name.shift(morty.get_center() + 1.2*UP)
randy_path = (0, 1, 3)
morty_path = (-2, -3, -4)
morty_crossed_lines = [
Line(self.points[i], self.points[j]).set_color("red")
for i, j in [(7, 1), (1, 5)]
]
kwargs = {"run_time" : 1.0}
self.clear()
self.add(randy)
self.wait()
self.add(morty, name)
self.wait()
self.remove(name)
small_randy = deepcopy(randy).scale(RANDOLPH_SCALE_FACTOR)
small_morty = deepcopy(morty).scale(RANDOLPH_SCALE_FACTOR)
small_randy.move_to(self.points[randy_path[0]])
small_morty.move_to(self.dual_points[morty_path[0]])
self.play(*[
FadeIn(mob)
for mob in self.vertices + self.edges
] + [
Transform(randy, small_randy),
Transform(morty, small_morty),
], **kwargs)
self.wait()
self.set_color_region(self.regions[morty_path[0]])
for last, next in zip(morty_path, morty_path[1:]):
self.play(WalkPiCreature(morty, self.dual_points[next]),**kwargs)
self.set_color_region(self.regions[next])
self.wait()
for last, next in zip(randy_path, randy_path[1:]):
line = Line(self.points[last], self.points[next])
line.set_color("yellow")
self.play(
WalkPiCreature(randy, self.points[next]),
ShowCreation(line),
**kwargs
)
self.wait()
self.play(*[
ApplyMethod(
line.rotate_in_place,
np.pi/10,
rate_func = wiggle)
for line in morty_crossed_lines
], **kwargs)
self.wait()
class RandolphMortimerSpanningTreeGame(GraphScene):
args_list = [(SampleGraph(),)]
def construct(self):
GraphScene.construct(self)
self.generate_spanning_tree()
self.generate_dual_graph()
self.generate_regions()
randy = Randolph().scale(RANDOLPH_SCALE_FACTOR)
morty = Mortimer().scale(RANDOLPH_SCALE_FACTOR)
randy.move_to(self.points[0])
morty.move_to(self.dual_points[0])
attempted_dual_point_index = 2
region_ordering = [0, 1, 7, 2, 3, 5, 4, 6]
dual_edges = [1, 3, 4, 7, 11, 9, 13]
time_per_dual_edge = 0.5
self.add(randy, morty)
self.play(ShowCreation(self.spanning_tree))
self.wait()
self.play(WalkPiCreature(
morty, self.dual_points[attempted_dual_point_index],
rate_func = lambda t : 0.3 * there_and_back(t),
run_time = 2.0,
))
self.wait()
for index in range(len(self.regions)):
# if index > 0:
# edge = self.edges[dual_edges[index-1]]
# midpoint = edge.get_center()
# self.play(*[
# ShowCreation(Line(
# midpoint,
# tip
# ).set_color("red"))
# for tip in edge.start, edge.end
# ], run_time = time_per_dual_edge)
self.set_color_region(self.regions[region_ordering[index]])
self.wait(time_per_dual_edge)
self.wait()
cycle_index = region_ordering[-1]
cycle = self.graph.region_cycles[cycle_index]
self.set_color_region(self.regions[cycle_index], "black")
self.play(ShowCreation(Mobject(*[
Line(self.points[last], self.points[next]).set_color("green")
for last, next in zip(cycle, list(cycle)[1:] + [cycle[0]])
])))
self.wait()
class MortimerCannotTraverseCycle(GraphScene):
args_list = [(SampleGraph(),)]
def construct(self):
GraphScene.construct(self)
self.generate_dual_graph()
dual_cycle = DUAL_CYCLE
trapped_points = [0, 1]
morty = Mortimer().scale(RANDOLPH_SCALE_FACTOR)
morty.move_to(self.dual_points[dual_cycle[0]])
time_per_edge = 0.5
text = TextMobject("""
One of these lines must be included
in the spanning tree if those two inner
vertices are to be reached.
""").scale(0.7).to_edge(UP)
all_lines = []
matching_edges = []
kwargs = {"run_time" : time_per_edge, "rate_func" : None}
for last, next in zip(dual_cycle, dual_cycle[1:]):
line = Line(self.dual_points[last], self.dual_points[next])
line.set_color("red")
self.play(
WalkPiCreature(morty, self.dual_points[next], **kwargs),
ShowCreation(line, **kwargs),
)
all_lines.append(line)
center = line.get_center()
distances = map(
lambda e : np.linalg.norm(center - e.get_center()),
self.edges
)
matching_edges.append(
self.edges[distances.index(min(distances))]
)
self.play(*[
Transform(v, Dot(
v.get_center(),
radius = 3*Dot.DEFAULT_RADIUS,
color = "green"
))
for v in np.array(self.vertices)[trapped_points]
])
self.add(text)
self.play(*[
Transform(line, deepcopy(edge).set_color(line.get_color()))
for line, edge in zip(all_lines, matching_edges)
])
self.wait()
class TwoPropertiesOfSpanningTree(Scene):
def construct(self):
spanning, tree = TextMobject(
["Spanning ", "Tree"],
size = "\\Huge"
).split()
spanning_explanation = TextMobject("""
Touches every vertex
""").shift(spanning.get_center() + 2*DOWN)
tree_explanation = TextMobject("""
No Cycles
""").shift(tree.get_center() + 2*UP)
self.add(spanning, tree)
self.wait()
for word, explanation, vect in [
(spanning, spanning_explanation, 0.5*UP),
(tree, tree_explanation, 0.5*DOWN)
]:
self.add(explanation)
self.add(Arrow(
explanation.get_center() + vect,
tail = word.get_center() - vect,
))
self.play(ApplyMethod(word.set_color, "yellow"))
self.wait()
class DualSpanningTree(GraphScene):
def construct(self):
GraphScene.construct(self)
self.generate_dual_graph()
self.generate_spanning_tree()
randy = Randolph()
randy.scale(RANDOLPH_SCALE_FACTOR)
randy.move_to(self.points[0])
morty = Mortimer()
morty.scale(RANDOLPH_SCALE_FACTOR)
morty.move_to(self.dual_points[0])
dual_edges = [1, 3, 4, 7, 11, 9, 13]
words = TextMobject("""
The red edges form a spanning tree of the dual graph!
""").to_edge(UP)
self.add(self.spanning_tree, randy, morty)
self.play(ShowCreation(Mobject(
*np.array(self.edges)[dual_edges]
).set_color("red")))
self.add(words)
self.wait()
class TreeCountFormula(Scene):
def construct(self):
time_per_branch = 0.5
text = TextMobject("""
In any tree:
$$E + 1 = V$$
""")
gs = GraphScene(SampleGraph())
gs.generate_treeified_spanning_tree()
branches = gs.treeified_spanning_tree.to_edge(LEFT).split()
all_dots = [Dot(branches[0].points[0])]
self.add(text, all_dots[0])
for branch in branches:
self.play(
ShowCreation(branch),
run_time = time_per_branch
)
dot = Dot(branch.points[-1])
self.add(dot)
all_dots.append(dot)
self.wait()
self.remove(*all_dots)
self.play(
FadeOut(text),
FadeIn(Mobject(*gs.edges + gs.vertices)),
*[
Transform(*pair)
for pair in zip(branches,gs.spanning_tree.split())
]
)
class FinalSum(Scene):
def construct(self):
lines = TexMobject([
"(\\text{Number of Randolph's Edges}) + 1 &= V \\\\ \n",
"(\\text{Number of Mortimer's Edges}) + 1 &= F \\\\ \n",
" \\Downarrow \\\\", "E","+","2","&=","V","+","F",
], size = "\\large").split()
for line in lines[:2] + [Mobject(*lines[2:])]:
self.add(line)
self.wait()
self.wait()
symbols = V, minus, E, plus, F, equals, two = TexMobject(
"V - E + F = 2".split(" ")
)
plus = TexMobject("+")
anims = []
for mob, index in zip(symbols, [-3, -2, -7, -6, -1, -4, -5]):
copy = plus if index == -2 else deepcopy(mob)
copy.center().shift(lines[index].get_center())
copy.scale_in_place(lines[index].get_width()/mob.get_width())
anims.append(CounterclockwiseTransform(copy, mob))
self.clear()
self.play(*anims, run_time = 2.0)
self.wait()
if __name__ == "__main__":
command_line_create_scene(MOVIE_PREFIX)
| 32.938385 | 81 | 0.539397 |
fa7783f554665e588292a4ea5fa90492a0d3cf0e | 542 | py | Python | setup.py | diogobenica/zoom-zt2 | 435b4a4ccba6e2f2de993563eb879da2f2b4b266 | [
"MIT"
] | null | null | null | setup.py | diogobenica/zoom-zt2 | 435b4a4ccba6e2f2de993563eb879da2f2b4b266 | [
"MIT"
] | null | null | null | setup.py | diogobenica/zoom-zt2 | 435b4a4ccba6e2f2de993563eb879da2f2b4b266 | [
"MIT"
] | null | null | null | from cx_Freeze import setup, Executable
base = None
executables = [Executable("zoomzt2-gui.py", base=base)]
packages = ["construct", "os", "optparse", "sys", "binascii", "mido", "rtmidi_python"]
options = {
'build_exe': {
'packages':packages,
'excludes':["pygame", "numpy"],
},
}
setup(
name = "zoomzt2-gui.py",
options = options,
version = "0.2.0.0",
description = 'Script for Upload Effects/Configuration to ZOOM G Series Pedals',
executables = executables
)
| 24.636364 | 87 | 0.594096 |
dea39091a5cc44fce8fa6cc0bf338a854538548d | 2,209 | py | Python | KitronikInterfaceForFischertechnik.py | KitronikLtd/microbit.micropython.modules | cfb3cdd50cd5f251725542602db32a09103266c7 | [
"MIT"
] | 2 | 2020-12-18T15:17:20.000Z | 2021-03-09T15:54:06.000Z | KitronikInterfaceForFischertechnik.py | KitronikLtd/microbit.micropython.modules | cfb3cdd50cd5f251725542602db32a09103266c7 | [
"MIT"
] | null | null | null | KitronikInterfaceForFischertechnik.py | KitronikLtd/microbit.micropython.modules | cfb3cdd50cd5f251725542602db32a09103266c7 | [
"MIT"
] | 1 | 2021-04-13T15:28:27.000Z | 2021-04-13T15:28:27.000Z | # microbit-module: kitronikInterfaceForFischertechnik@0.0.1
from microbit import *
import math
class Interface:
def motorOn(self, motor, direction, speed):
if speed > 100:
speed = 100
elif speed < 0:
speed = 0
speed = speed * 10
if motor == "Motor1":
if direction == "forward":
pin8.write_analog(speed)
pin12.write_digital(0)
elif direction == "reverse":
pin12.write_analog(speed)
pin8.write_digital(0)
elif motor == "Motor2":
if direction == "forward":
pin16.write_analog(speed)
pin2.write_digital(0)
elif direction == "reverse":
pin2.write_analog(speed)
pin16.write_digital(0)
def motorOff(self, motor):
if motor == "Motor1":
pin12.write_digital(0)
pin8.write_digital(0)
elif motor == "Motor2":
pin2.write_digital(0)
pin16.write_digital(0)
def led(self, pinSelection, illumination):
if pinSelection == "P0":
if illumination == "on":
pin0.write_digital(1)
elif illumination == "off":
pin0.write_digital(0)
elif pinSelection == "P1":
if illumination == "on":
pin1.write_digital(1)
elif illumination == "off":
pin1.write_digital(0)
def phototransistor(self, pinSelection):
if pinSelection == "P0":
reading = pin0.read_analog()
elif pinSelection == "P1":
reading = pin1.read_analog()
return reading
def ntc(self, pinSelection):
if pinSelection == "P0":
reading = pin0.read_analog()
elif pinSelection == "P1":
reading = pin1.read_analog()
convertReading = reading * (3.3/1024) # convert reading to voltage reading x (supply divide ADC resoluction)
ntcResistor = 3.3/((3.3-convertReading)/4700) # calculate resistance
temperatureC = (3880/math.log(ntcResistor/0.13)) - 273.15
return temperatureC | 34.515625 | 122 | 0.538253 |
c44f2b09573b1ce6d83f7e16abec21c96c1d7f58 | 2,423 | py | Python | cantools/subparsers/decode.py | rettenbs/cantools | 6ae20991f28224a35992abbfd4f41040f4b503c9 | [
"MIT"
] | 1,063 | 2016-06-16T10:07:22.000Z | 2021-10-13T17:21:18.000Z | cantools/subparsers/decode.py | rettenbs/cantools | 6ae20991f28224a35992abbfd4f41040f4b503c9 | [
"MIT"
] | 334 | 2015-09-02T19:57:21.000Z | 2021-10-10T19:05:11.000Z | cantools/subparsers/decode.py | rettenbs/cantools | 6ae20991f28224a35992abbfd4f41040f4b503c9 | [
"MIT"
] | 341 | 2015-09-02T02:27:42.000Z | 2021-09-29T05:44:50.000Z | import argparse
import sys
from argparse_addons import Integer
from .. import database
from .. import logreader
from .__utils__ import format_message_by_frame_id
def _do_decode(args):
dbase = database.load_file(args.database,
encoding=args.encoding,
frame_id_mask=args.frame_id_mask,
prune_choices=not args.no_prune,
strict=not args.no_strict)
decode_choices = not args.no_decode_choices
parser = logreader.Parser(sys.stdin)
for line, frame in parser.iterlines(keep_unknowns=True):
if frame is not None:
line += ' ::'
line += format_message_by_frame_id(dbase,
frame.frame_id,
frame.data,
decode_choices,
args.single_line)
print(line)
def add_subparser(subparsers):
decode_parser = subparsers.add_parser(
'decode',
description=('Decode "candump" CAN frames read from standard input '
'and print them in a human readable format.'),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
decode_parser.add_argument(
'-c', '--no-decode-choices',
action='store_true',
help='Do not convert scaled values to choice strings.')
decode_parser.add_argument(
'-s', '--single-line',
action='store_true',
help='Print the decoded message on a single line.')
decode_parser.add_argument(
'-e', '--encoding',
help='File encoding.')
decode_parser.add_argument(
'--no-prune',
action='store_true',
help='Refrain from shortening the names of named signal values.')
decode_parser.add_argument(
'--no-strict',
action='store_true',
help='Skip database consistency checks.')
decode_parser.add_argument(
'-m', '--frame-id-mask',
type=Integer(0),
help=('Only compare selected frame id bits to find the message in the '
'database. By default the candump and database frame ids must '
'be equal for a match.'))
decode_parser.add_argument(
'database',
help='Database file.')
decode_parser.set_defaults(func=_do_decode)
| 37.859375 | 79 | 0.578622 |
73a63fecedb04faa0bf70b2c6185a80ac601e868 | 646 | py | Python | external/plex/dist/tests/test4.py | almartin82/bayeslite | a27f243b5f16cc6a01e84336a829e5b65d665b7b | [
"Apache-2.0"
] | 964 | 2015-09-24T15:02:05.000Z | 2022-03-29T21:41:21.000Z | external/plex/dist/tests/test4.py | almartin82/bayeslite | a27f243b5f16cc6a01e84336a829e5b65d665b7b | [
"Apache-2.0"
] | 435 | 2015-09-23T16:46:58.000Z | 2020-04-19T12:32:03.000Z | external/plex/dist/tests/test4.py | almartin82/bayeslite | a27f243b5f16cc6a01e84336a829e5b65d665b7b | [
"Apache-2.0"
] | 86 | 2015-10-24T20:08:30.000Z | 2021-08-09T13:53:00.000Z | import Test
import sys
from Plex import *
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_"
wax = Any("(")
wane = Any(")")
letter = Any(letters)
space = Any(" \t\n")
def open_paren(s, t):
s.counter = s.counter + 1
def close_paren(s, t):
s.counter = s.counter - 1
def got_a_letter(s, t):
if s.counter == 0:
return 'letter'
else:
return None
lex = Lexicon([
(wax, open_paren),
(wane, close_paren),
(letter, got_a_letter),
(space, IGNORE)
],
debug = Test.debug,
timings = sys.stderr
)
class MyScanner(Scanner):
counter = 0
trace = 0
Test.run(lex, "test4", scanner_class = MyScanner, trace = 0)
| 15.756098 | 65 | 0.659443 |
da05d12ee91d8b8340849bfb2e68ea8de1f876a8 | 5,110 | py | Python | src/project2/lr-mnist.py | frillecode/VisualAnalytics2021 | fd90adb19da8953ad21ada6715120387740214f1 | [
"MIT"
] | null | null | null | src/project2/lr-mnist.py | frillecode/VisualAnalytics2021 | fd90adb19da8953ad21ada6715120387740214f1 | [
"MIT"
] | null | null | null | src/project2/lr-mnist.py | frillecode/VisualAnalytics2021 | fd90adb19da8953ad21ada6715120387740214f1 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Import necessary libraries/modules
import os
import argparse
import numpy as np
import cv2
from sklearn import metrics
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
class LogRegMNIST():
"""This is a class for performing a Logistic Regression classification on the MNIST dataset.
"""
def __init__(self, digits, args):
self.args = args
self.X = digits.data.astype("float") #extracting data
self.y = digits.target #extracting labels
def split(self):
""" Function for splitting MNIST dataset into train and test sets.
"""
# Normalize (MinMax regularization)
self.X = (self.X - self.X.min())/(self.X.max() - self.X.min())
# Split into train and test set
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X,
self.y,
random_state=self.args['random_state'],
train_size=1-self.args['test_size'],
test_size=self.args['test_size'])
def train_model(self):
"""Function for training the Logistic Regression classifier.
"""
# Initialise model and fit that model to the training data and labels
self.clf = LogisticRegression(penalty='none',
tol=0.1,
solver='saga',
multi_class='multinomial').fit(self.X_train, self.y_train)
def calc_eval_metrics(self):
"""Function for calculating evaluation metrics.
Input:
clf: trained Logistic Regression classifier
"""
# Take the trained model and use to predict test class
self.y_pred = self.clf.predict(self.X_test)
# Calculate evaluation metrics
cm = metrics.classification_report(self.y_test, self.y_pred)
return cm
def save_eval_metrics(self, cm):
"""Function for saving file with evaluation metrics.
Input:
cm: evaluation metrics
"""
# Specifying output path
outpath = os.path.join("out", f"{self.args['filename']}.txt")
# Writing file
with open(outpath, "w", encoding="utf-8") as file:
file.write(cm)
def run_classifier(self):
"""Function for running all functions within the class in the correct order.
"""
# Splitting data
self.split()
# Train model
self.train_model()
# Calculate evaluation metrics
cm = self.calc_eval_metrics()
# Print evaluation metrics
print(f"\n EVALUATION METRICS: \n {cm}")
# Save evaluation metrics
self.save_eval_metrics(cm)
# Creating a function that checks whether a given value is between 0 and 1 and return an error if it is not. This is used to ensure that only a test_size-argument within the correct range can be parsed in the command-line.
def percentFloat(string):
value = float(string)
if value < 0 or value > 1:
raise argparse.ArgumentTypeError('Value has to be between 0 and 1')
return value
# Defining main function
def main():
ap = argparse.ArgumentParser(description="[INFO] This script uses the full MNIST data set, trains a Logistic Regression Classifier, and prints and saves the evaluation metrics to the terminal.")
# Argument for specifying a random-state value
ap.add_argument("-rs",
"--random_state",
required=False,
type=int,
default=9,
help="int, value for random state of model")
# Argument for specifying size of test set
ap.add_argument("-ts",
"--test_size",
required=False,
type=percentFloat, #here I use the function I created above
default=0.2,
help="float, proportional size of test set (must be number between 0 and 1)")
# Argument for specifying filename of evaluation metrics
ap.add_argument("-fn",
"--filename",
required=False,
type=str,
default="evaluation_metrics_LR",
help="str, filename for saving the evaluation metrics")
args = vars(ap.parse_args())
# Loading data
digits = datasets.load_digits()
# Turning into LogRegMNIST object (the class I created above)
logreg = LogRegMNIST(digits, args)
# Perform classification
logreg.run_classifier()
# Define behaviour when called from command line
if __name__=="__main__":
main()
print("[INFO] The evaluation metrics has been saved in 'out/'.")
| 38.134328 | 223 | 0.583562 |
bcd9066b1ba7a552d03c952228499c6f1f90dd38 | 7,012 | py | Python | tests/python/relay/test_ir_nodes.py | Xuyuanjia2014/tvm | 892f8305e77ad506660b851f9ce4c81be0f95d9d | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2,084 | 2020-11-25T02:31:53.000Z | 2022-03-31T11:33:47.000Z | tests/python/relay/test_ir_nodes.py | Xuyuanjia2014/tvm | 892f8305e77ad506660b851f9ce4c81be0f95d9d | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3,022 | 2020-11-24T14:02:31.000Z | 2022-03-31T23:55:31.000Z | tests/python/relay/test_ir_nodes.py | Xuyuanjia2014/tvm | 892f8305e77ad506660b851f9ce4c81be0f95d9d | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 977 | 2020-11-25T00:54:52.000Z | 2022-03-31T12:47:08.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" test ir"""
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.tir.expr import *
from tvm.relay import op
import numpy as np
def check_json_roundtrip(node):
json_str = tvm.ir.save_json(node)
back = tvm.ir.load_json(json_str)
assert tvm.ir.structural_equal(back, node, map_free_vars=True)
# Span
def test_span():
span = relay.Span(None, 1, 2, 3, 4)
assert span.source_name == None
assert span.line == 1
assert span.end_line == 2
assert span.column == 3
assert span.end_column == 4
assert span.same_as(span)
assert span == span
assert isinstance(span, relay.base.Span)
str(span)
# span is not a node so we can't use graph_equal
# to test the round trip
back = tvm.ir.load_json(tvm.ir.save_json(span))
assert back.source_name == span.source_name
assert back.line == span.line
assert back.end_line == span.end_line
assert back.column == span.column
assert back.end_column == span.end_column
def test_constant():
arr = tvm.nd.array(10)
const = relay.Constant(arr)
assert const.data == arr
assert const.span == None
str(const)
check_json_roundtrip(const)
def test_tuple():
fields = tvm.runtime.convert([])
tup = relay.Tuple(fields)
assert tup.fields == fields
assert tup.span == None
str(tup)
check_json_roundtrip(tup)
def test_local_var():
name_hint = "s"
lv = relay.Var(name_hint)
assert lv.name_hint == name_hint
assert lv.type_annotation is None
# assert lv.span == None todo(@jroesch): what do we do about spans
str(lv)
check_json_roundtrip(lv)
t1 = relay.ty.TensorType((), "float")
lv = relay.Var(name_hint, t1)
assert lv.name_hint == name_hint
assert lv.type_annotation == t1
def test_global_var():
name_hint = "g"
gv = relay.GlobalVar(name_hint)
gv.name_hint == name_hint
# assert lv.span == None todo(@jroesch): what do we do about spans
str(gv)
check_json_roundtrip(gv)
def test_function():
param_names = ["a", "b", "c", "d"]
params = tvm.runtime.convert([relay.Var(n) for n in param_names])
ret_type = relay.TupleType(tvm.runtime.convert([]))
body = relay.Tuple(tvm.runtime.convert([]))
type_params = tvm.runtime.convert([])
fn = relay.Function(params, body, ret_type, type_params)
fn = fn.with_attr("test_attribute", "value")
fn = fn.with_attr("test_attribute1", "value1")
assert fn.params == params
assert fn.body == body
assert fn.type_params == type_params
assert fn.span == None
assert fn.attrs["test_attribute"] == "value"
assert fn.attrs["test_attribute1"] == "value1"
str(fn)
check_json_roundtrip(fn)
def test_function_attrs():
param_names = ["a", "b", "c", "d"]
params = tvm.runtime.convert([relay.var(n, shape=(5, 2)) for n in param_names])
ret_type = relay.TupleType(tvm.runtime.convert([]))
body = relay.Tuple(tvm.runtime.convert([]))
type_params = tvm.runtime.convert([])
fn = relay.Function(params, body, ret_type, type_params)
model_params = {}
for param in params[:1]:
cty = param.type_annotation
tensor = np.random.rand(*[int(sh) for sh in cty.shape]).astype(cty.dtype)
model_params[param] = relay.Constant(tvm.nd.array(tensor))
fn = fn.with_attr("__params__", model_params)
assert fn.params == params
assert fn.body == body
assert fn.type_params == type_params
assert fn.span == None
str(fn)
check_json_roundtrip(fn)
json_str = tvm.ir.save_json(fn)
fn_after = tvm.ir.load_json(json_str)
model_params_after = fn_after.attrs["__params__"]
after_keys = [item[0] for item in model_params_after.items()]
for key1, key2 in zip(model_params, after_keys):
assert key1.name_hint == key2.name_hint
p1 = model_params[key1]
p2 = model_params_after[key2]
np.testing.assert_allclose(p1.data.numpy(), p2.data.numpy())
def test_call():
op = relay.Var("f")
arg_names = ["a", "b", "c", "d"]
args = tvm.runtime.convert([relay.Var(n) for n in arg_names])
call = relay.Call(op, args, None, None)
assert call.op == op
assert call.args == args
assert call.span == None
str(call)
check_json_roundtrip(call)
def test_let():
lv = relay.Var("x")
ty = None
arr = tvm.nd.array(10)
value = relay.Constant(arr)
# I would prefer that the order of arguments
# matches syntax let x: t = v in b
let = relay.Let(lv, value, lv)
assert let.var == lv
assert let.value == value
assert let.body == lv
assert let.span == None
str(let)
check_json_roundtrip(let)
def test_if():
cond = relay.Var("cond")
left = relay.Var("left")
right = relay.Var("right")
ife = relay.If(cond, left, right)
assert ife.cond == cond
assert ife.true_branch == left
assert ife.false_branch == right
assert ife.span == None
str(ife)
check_json_roundtrip(ife)
def test_tuple_get_item():
tup = relay.Var("tuple")
get = relay.TupleGetItem(tup, 1)
assert get.tuple_value == tup
assert get.index == 1
str(get)
check_json_roundtrip(get)
def test_op():
add = op.op.get("add")
check_json_roundtrip(add)
def test_conv2d_attrs():
data = relay.var("data", shape=(1, 3, 224, 224))
param = relay.var("param", shape=(64, 3, 7, 7))
out = op.nn.conv2d(data, param, strides=(2, 2), padding=(3, 3), channels=64, kernel_size=(7, 7))
check_json_roundtrip(out)
def test_large_grpah():
# Test large graphs to avoid stack overflow in serialize/deserialize
size = int(1e5)
var = [relay.var("var_" + str(i), shape=(2, 3)) for i in range(size)]
body = var[-1]
for i in range(size, 1, -1):
body = relay.Let(var[i - 1], op.add(var[i - 2], var[i - 2]), body)
func = relay.Function([var[0]], body)
check_json_roundtrip(func)
if __name__ == "__main__":
test_span()
test_constant()
test_tuple()
test_local_var()
test_global_var()
test_function()
test_function_attrs()
test_call()
test_let()
test_if()
test_tuple_get_item()
test_op()
test_conv2d_attrs()
test_large_grpah()
| 29.586498 | 100 | 0.662151 |
f43e6b874d3f99a7abfb561cadfa4ec1d48eab06 | 920 | py | Python | src/Chapter12/Exercise2.py | djeada/Python-For-Informatics | fc93cb60b6c81ee98b82b76934d39930a0c323d0 | [
"MIT"
] | null | null | null | src/Chapter12/Exercise2.py | djeada/Python-For-Informatics | fc93cb60b6c81ee98b82b76934d39930a0c323d0 | [
"MIT"
] | null | null | null | src/Chapter12/Exercise2.py | djeada/Python-For-Informatics | fc93cb60b6c81ee98b82b76934d39930a0c323d0 | [
"MIT"
] | null | null | null | """
Exercise 2
Change your socket program so that it counts the number of characters it has
received and stops displaying any text after it has shown 3000 characters.
The program should retrieve the entire document and count the total number
of characters and display the count of the number of characters at the end
of the document.
"""
import socket
import os
url = input("Enter url: ")
try:
words = url.split("/")
if words[0] != "http:":
host_url = words[0]
url = "http://" + url
else:
host_url = words[2]
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect((host_url, 80))
mysock.send("GET " + url + " HTTP/1.0\n\n")
except:
print("Please enter a valid URL")
os.sys.exit(1)
count = 0
while True:
data = mysock.recv(512)
count += len(data)
if (len(data) < 1) or count >= 3000:
break
print(data)
mysock.close()
| 24.210526 | 76 | 0.655435 |
6aae7decc4c3307d536733957f57616c97f32f99 | 448 | py | Python | absort/__init__.py | MapleCCC/ABSort | fa020d7f2d6025603910c12fdfe775922d33afbc | [
"MIT"
] | null | null | null | absort/__init__.py | MapleCCC/ABSort | fa020d7f2d6025603910c12fdfe775922d33afbc | [
"MIT"
] | null | null | null | absort/__init__.py | MapleCCC/ABSort | fa020d7f2d6025603910c12fdfe775922d33afbc | [
"MIT"
] | null | null | null | from .__main__ import (
CommentStrategy,
FileAction,
FormatOption,
SortOrder,
NameRedefinition,
BypassPromptLevel,
absort_file,
absort_files,
absort_str,
)
from .__version__ import __version__
__all__ = [
"absort_str",
"absort_file",
"absort_files",
"CommentStrategy",
"FormatOption",
"FileAction",
"SortOrder",
"NameRedefinition",
"BypassPromptLevel",
"__version__",
]
| 16.592593 | 36 | 0.651786 |
a609a4b5e5c62db600741a1595503d395e7794c0 | 15,439 | py | Python | whoisenum/whois/whois.py | bbhunter/WhoEnum | 0852478d073ac3d0239f5a896d9c77bcde17c59f | [
"MIT"
] | 17 | 2021-11-18T13:10:03.000Z | 2022-02-17T03:20:42.000Z | whoisenum/whois/whois.py | bbhunter/WhoEnum | 0852478d073ac3d0239f5a896d9c77bcde17c59f | [
"MIT"
] | null | null | null | whoisenum/whois/whois.py | bbhunter/WhoEnum | 0852478d073ac3d0239f5a896d9c77bcde17c59f | [
"MIT"
] | 6 | 2021-11-18T13:26:25.000Z | 2022-01-04T06:04:40.000Z | # -*- coding: utf-8 -*-
"""
Whois client for python
transliteration of:
http://www.opensource.apple.com/source/adv_cmds/adv_cmds-138.1/whois/whois.c
Copyright (c) 2010 Chris Wolf
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
import os
import optparse
import socket
import sys
import re
from builtins import object
from builtins import *
import logging
standard_library.install_aliases()
logger = logging.getLogger(__name__)
class NICClient(object):
ABUSEHOST = "whois.abuse.net"
NICHOST = "whois.crsnic.net"
INICHOST = "whois.networksolutions.com"
DNICHOST = "whois.nic.mil"
GNICHOST = "whois.nic.gov"
ANICHOST = "whois.arin.net"
LNICHOST = "whois.lacnic.net"
RNICHOST = "whois.ripe.net"
PNICHOST = "whois.apnic.net"
MNICHOST = "whois.ra.net"
QNICHOST_TAIL = ".whois-servers.net"
SNICHOST = "whois.6bone.net"
BNICHOST = "whois.registro.br"
NORIDHOST = "whois.norid.no"
IANAHOST = "whois.iana.org"
PANDIHOST = "whois.pandi.or.id"
DENICHOST = "whois.denic.de"
AI_HOST = "whois.nic.ai"
AR_HOST = "whois.nic.ar"
BY_HOST = "whois.cctld.by"
HR_HOST = "whois.dns.hr"
APP_HOST = "whois.nic.google"
DEV_HOST = "whois.nic.google"
GAMES_HOST = "whois.nic.games"
PAGE_HOST = "whois.nic.page"
CL_HOST = "whois.nic.cl"
CR_HOST = "whois.nic.cr"
DE_HOST = "whois.denic.de"
DK_HOST = "whois.dk-hostmaster.dk"
DO_HOST = "whois.nic.do"
CA_HOST = "whois.ca.fury.ca"
HK_HOST = "whois.hkirc.hk"
HN_HOST = "whois.nic.hn"
KZ_HOST = "whois.nic.kz"
DEFAULT_PORT = "nicname"
MONEY_HOST = "whois.nic.money"
JOBS_HOST = "whois.nic.jobs"
LAT_HOST = "whois.nic.lat"
LI_HOST = "whois.nic.li"
MX_HOST = "whois.mx"
PE_HOST = "kero.yachay.pe"
ONLINE_HOST = "whois.nic.online"
IST_HOST = "whois.afilias-srs.net"
CHAT_HOST = "whois.nic.chat"
WEBSITE_HOST = "whois.nic.website"
OOO_HOST = "whois.nic.ooo"
MARKET_HOST = "whois.nic.market"
NL_HOST = 'whois.domain-registry.nl'
WHOIS_RECURSE = 0x01
WHOIS_QUICK = 0x02
ip_whois = [LNICHOST, RNICHOST, PNICHOST, BNICHOST, PANDIHOST]
def __init__(self):
self.use_qnichost = False
def findwhois_server(self, buf, hostname, query):
"""Search the initial TLD lookup results for the regional-specifc
whois server for getting contact details.
"""
nhost = None
match = re.compile(r'Domain Name: {}\s*.*?Whois Server: (.*?)\s'.format(query), flags=re.IGNORECASE | re.DOTALL).search(buf)
if match:
nhost = match.groups()[0]
# if the whois address is domain.tld/something then
# s.connect((hostname, 43)) does not work
if nhost.count('/') > 0:
nhost = None
elif hostname == NICClient.ANICHOST:
for nichost in NICClient.ip_whois:
if buf.find(nichost) != -1:
nhost = nichost
break
return nhost
def whois(self, query, hostname, flags, many_results=False, quiet=True):
"""Perform initial lookup with TLD whois server
then, if the quick flag is false, search that result
for the region-specifc whois server and do a lookup
there for contact details. If `quiet` is `True`, will
not print a message to STDOUT when a socket error
is encountered.
"""
response = b''
if "SOCKS" in os.environ:
try:
import socks
except ImportError as e:
logger.error("You need to install the Python socks module. Install PIP (https://bootstrap.pypa.io/get-pip.py) and then 'pip install PySocks'")
raise e
socks_user, socks_password = None, None
if "@" in os.environ["SOCKS"]:
creds, proxy = os.environ["SOCKS"].split("@")
socks_user, socks_password = creds.split(":")
else:
proxy = os.environ["SOCKS"]
socksproxy, port = proxy.split(":")
socks_proto = socket.AF_INET
if socket.AF_INET6 in [sock[0] for sock in socket.getaddrinfo(socksproxy, port)]:
socks_proto=socket.AF_INET6
s = socks.socksocket(socks_proto)
s.set_proxy(socks.SOCKS5, socksproxy, int(port), True, socks_user, socks_password)
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(10)
try: # socket.connect in a try, in order to allow things like looping whois on different domains without stopping on timeouts: https://stackoverflow.com/questions/25447803/python-socket-connection-exception
s.connect((hostname, 43))
try:
query = query.decode('utf-8')
except UnicodeEncodeError:
pass # Already Unicode (python2's error)
except AttributeError:
pass # Already Unicode (python3's error)
if hostname == NICClient.DENICHOST:
query_bytes = "-T dn,ace -C UTF-8 " + query
elif hostname == NICClient.DK_HOST:
query_bytes = " --show-handles " + query
elif hostname.endswith(NICClient.QNICHOST_TAIL) and many_results:
query_bytes = '=' + query
else:
query_bytes = query
s.send(bytes(query_bytes, 'utf-8') + b"\r\n")
# recv returns bytes
while True:
d = s.recv(4096)
response += d
if not d:
break
s.close()
nhost = None
response = response.decode('utf-8', 'replace')
if 'with "=xxx"' in response:
return self.whois(query, hostname, flags, True)
if flags & NICClient.WHOIS_RECURSE and nhost is None:
nhost = self.findwhois_server(response, hostname, query)
if nhost is not None:
response += self.whois(query, nhost, 0, quiet=True)
except socket.error as exc: # 'response' is assigned a value (also a str) even on socket timeout
if not quiet:
print("Error trying to connect to socket: closing socket - {}".format(exc))
s.close()
response = "Socket not responding: {}".format(exc)
return response
def choose_server(self, domain):
"""Choose initial lookup NIC host"""
try:
domain = domain.encode('idna').decode('utf-8')
except TypeError:
domain = domain.decode('utf-8').encode('idna').decode('utf-8')
except AttributeError:
domain = domain.decode('utf-8').encode('idna').decode('utf-8')
if domain.endswith("-NORID"):
return NICClient.NORIDHOST
if domain.endswith("id"):
return NICClient.PANDIHOST
if domain.endswith("hr"):
return NICClient.HR_HOST
domain = domain.split('.')
if len(domain) < 2:
return None
tld = domain[-1]
if tld[0].isdigit():
return NICClient.ANICHOST
elif tld == 'ai':
return NICClient.AI_HOST
elif tld == 'app':
return NICClient.APP_HOST
elif tld == 'dev':
return NICClient.DEV_HOST
elif tld == 'games':
return NICClient.GAMES_HOST
elif tld == 'page':
return NICClient.PAGE_HOST
elif tld == 'money':
return NICClient.MONEY_HOST
elif tld == 'online':
return NICClient.ONLINE_HOST
elif tld == 'cl':
return NICClient.CL_HOST
elif tld == 'ar':
return NICClient.AR_HOST
elif tld == 'by':
return NICClient.BY_HOST
elif tld == 'cr':
return NICClient.CR_HOST
elif tld == 'ca':
return NICClient.CA_HOST
elif tld == 'do':
return NICClient.DO_HOST
elif tld == 'de':
return NICClient.DE_HOST
elif tld == 'hk':
return NICClient.HK_HOST
elif tld == 'hn':
return NICClient.HN_HOST
elif tld == 'jobs':
return NICClient.JOBS_HOST
elif tld == 'lat':
return NICClient.LAT_HOST
elif tld == 'li':
return NICClient.LI_HOST
elif tld == 'mx':
return NICClient.MX_HOST
elif tld == 'pe':
return NICClient.PE_HOST
elif tld == 'ist':
return NICClient.IST_HOST
elif tld == 'kz':
return NICClient.KZ_HOST
elif tld == 'chat':
return NICClient.CHAT_HOST
elif tld == 'website':
return NICClient.WEBSITE_HOST
elif tld == 'ooo':
return NICClient.OOO_HOST
elif tld == 'market':
return NICClient.MARKET_HOST
elif tld == 'nl':
return NICClient.NL_HOST
else:
return tld + NICClient.QNICHOST_TAIL
def whois_lookup(self, options, query_arg, flags):
"""Main entry point: Perform initial lookup on TLD whois server,
or other server to get region-specific whois server, then if quick
flag is false, perform a second lookup on the region-specific
server for contact records"""
nichost = None
# whoud happen when this function is called by other than main
if options is None:
options = {}
if ('whoishost' not in options or options['whoishost'] is None) \
and ('country' not in options or options['country'] is None):
self.use_qnichost = True
options['whoishost'] = NICClient.NICHOST
if not (flags & NICClient.WHOIS_QUICK):
flags |= NICClient.WHOIS_RECURSE
if 'country' in options and options['country'] is not None:
result = self.whois(
query_arg,
options['country'] + NICClient.QNICHOST_TAIL,
flags
)
elif self.use_qnichost:
nichost = self.choose_server(query_arg)
if nichost is not None:
result = self.whois(query_arg, nichost, flags)
else:
result = ''
else:
result = self.whois(query_arg, options['whoishost'], flags)
return result
def parse_command_line(argv):
"""Options handling mostly follows the UNIX whois(1) man page, except
long-form options can also be used.
"""
flags = 0
usage = "usage: %prog [options] name"
parser = optparse.OptionParser(add_help_option=False, usage=usage)
parser.add_option("-a", "--arin", action="store_const",
const=NICClient.ANICHOST, dest="whoishost",
help="Lookup using host " + NICClient.ANICHOST)
parser.add_option("-A", "--apnic", action="store_const",
const=NICClient.PNICHOST, dest="whoishost",
help="Lookup using host " + NICClient.PNICHOST)
parser.add_option("-b", "--abuse", action="store_const",
const=NICClient.ABUSEHOST, dest="whoishost",
help="Lookup using host " + NICClient.ABUSEHOST)
parser.add_option("-c", "--country", action="store",
type="string", dest="country",
help="Lookup using country-specific NIC")
parser.add_option("-d", "--mil", action="store_const",
const=NICClient.DNICHOST, dest="whoishost",
help="Lookup using host " + NICClient.DNICHOST)
parser.add_option("-g", "--gov", action="store_const",
const=NICClient.GNICHOST, dest="whoishost",
help="Lookup using host " + NICClient.GNICHOST)
parser.add_option("-h", "--host", action="store",
type="string", dest="whoishost",
help="Lookup using specified whois host")
parser.add_option("-i", "--nws", action="store_const",
const=NICClient.INICHOST, dest="whoishost",
help="Lookup using host " + NICClient.INICHOST)
parser.add_option("-I", "--iana", action="store_const",
const=NICClient.IANAHOST, dest="whoishost",
help="Lookup using host " + NICClient.IANAHOST)
parser.add_option("-l", "--lcanic", action="store_const",
const=NICClient.LNICHOST, dest="whoishost",
help="Lookup using host " + NICClient.LNICHOST)
parser.add_option("-m", "--ra", action="store_const",
const=NICClient.MNICHOST, dest="whoishost",
help="Lookup using host " + NICClient.MNICHOST)
parser.add_option("-p", "--port", action="store",
type="int", dest="port",
help="Lookup using specified tcp port")
parser.add_option("-Q", "--quick", action="store_true",
dest="b_quicklookup",
help="Perform quick lookup")
parser.add_option("-r", "--ripe", action="store_const",
const=NICClient.RNICHOST, dest="whoishost",
help="Lookup using host " + NICClient.RNICHOST)
parser.add_option("-R", "--ru", action="store_const",
const="ru", dest="country",
help="Lookup Russian NIC")
parser.add_option("-6", "--6bone", action="store_const",
const=NICClient.SNICHOST, dest="whoishost",
help="Lookup using host " + NICClient.SNICHOST)
parser.add_option("-n", "--ina", action="store_const",
const=NICClient.PANDIHOST, dest="whoishost",
help="Lookup using host " + NICClient.PANDIHOST)
parser.add_option("-?", "--help", action="help")
return parser.parse_args(argv)
if __name__ == "__main__":
flags = 0
nic_client = NICClient()
options, args = parse_command_line(sys.argv)
if options.b_quicklookup:
flags = flags | NICClient.WHOIS_QUICK
logger.debug(nic_client.whois_lookup(options.__dict__, args[1], flags))
| 40.205729 | 214 | 0.589611 |
59362b98600ad072c721be547c88185f10a9e7b6 | 4,109 | py | Python | apu/encoding/json/np.py | afeldman/apu | 223cd54ce8696c504e08baa94b34debb4f8dc0a2 | [
"MIT"
] | null | null | null | apu/encoding/json/np.py | afeldman/apu | 223cd54ce8696c504e08baa94b34debb4f8dc0a2 | [
"MIT"
] | null | null | null | apu/encoding/json/np.py | afeldman/apu | 223cd54ce8696c504e08baa94b34debb4f8dc0a2 | [
"MIT"
] | null | null | null | """
handling numpy data for json
author: anton feldmann <anton.feldmann@gmail.com>
"""
from json.encoder import JSONEncoder
from json.decoder import JSONDecoder
import numpy as np
# pylint: disable=C0103,R0201,R1705,R0911
class NumpyEncoder(JSONEncoder):
""" Custom encoder for numpy data types
The NumpyEncoder is a JSONEncoder
Examples:
.. example_code::
>>> import numpy as np
>>> import json
>>> from apu.encoding.json.np import NumpyEncoder
>>> arr = array([ 0, 239, 479, 717, 952, 1192, 1432, 1667],
... dtype=int64)
>>> json.dumps(arr,cls=NumpyEncoder)
"""
def np_list(self, obj):
""" numpy array object to json list """
return obj.tolist()
def np_float(self, obj):
""" numpy float type to float """
return float(obj)
def np_int(self, obj):
""" numpy int to int """
return int(obj)
def np_complex(self, obj):
""" numpy complex to dict.
because the decoder has to decode
complex i use a dict """
return {"real": obj.real, "imag": obj.imag}
def np_bool(self, obj):
""" numpy boolean to boolean """
return bool(obj)
def np_null(self, obj):
""" numpy null no None or json null """
return None
def np(self, obj):
""" np function to check for all numpy objects """
if isinstance(obj, np.integer):
return self.np_int(obj)
elif isinstance(obj, np.floating):
return self.np_float(obj)
elif isinstance(obj, (np.complex_, np.complex64, np.complex128)):
return self.np_complex(obj)
elif isinstance(obj, (np.ndarray, )):
return self.np_list(obj)
elif isinstance(obj, (np.bool_)):
return self.np_bool(obj)
elif isinstance(obj, (np.void)):
return self.np_null(obj)
return JSONEncoder.default(self, obj)
# pylint: disable=E0202,W0237
def default(self, obj):
""" default Encoder entrypoint to encode """
return self.np(obj)
class NumpyDecoder(JSONDecoder):
""" Custom decode for numpy data types
Examples:
.. example_code::
>>> import numpy as np
>>> import json
>>> from apu.encoding.json.np import NumpyDecoder
>>> arr = '[[2.468031, 0.0, 0.0],
[-1.234015, 2.137377, 0.0],
[0.0, 0.0, 19.998293]]'
>>> json.loads(arr,cls=NumpyDecoder)
[[ 2.468031 0. 0. ]
[-1.234015 2.137377 0. ]
[ 0. 0. 19.998293]]
"""
_recursable_types = [str, list, dict]
def _is_recursive(self, obj) -> bool:
""" check if the onject is recursiveable
Returns:
(bool): the object is recursiveable
"""
return isinstance(obj, tuple(NumpyDecoder._recursable_types))
# pylint: disable=R1710, R0912
def decode(self, obj, *args, **kwargs):
""" decode the json string """
if not kwargs.get('recurse', False):
obj = super().decode(obj, *args, **kwargs)
if isinstance(obj, list):
try:
return np.array(obj)
except: # pylint: disable=W0702
for item in obj:
if self._is_recursive(item):
obj[item] = self.decode(item, recurse=True)
elif isinstance(obj, dict):
for key, value in obj.items():
if str(key) in "real":
return np.complex(obj['real'], obj['imag'])
elif self._is_recursive(value):
obj[key] = self.decode(value, recurse=True)
elif isinstance(obj, bool):
return np.bool(obj)
elif isinstance(obj, float):
return np.float(obj)
elif obj is None:
return np.void
else:
return obj
| 29.141844 | 74 | 0.527379 |
f88671145849cd4cc233887525968fe559fc8b6b | 6,939 | py | Python | classy/utils/rich_config.py | sunglasses-ai/classy | c166490a30d8ba6d7c25f70ce707b7a2ddcfb53f | [
"Apache-2.0"
] | 26 | 2021-10-17T08:32:53.000Z | 2022-03-30T10:57:13.000Z | classy/utils/rich_config.py | sunglasses-ai/classy | c166490a30d8ba6d7c25f70ce707b7a2ddcfb53f | [
"Apache-2.0"
] | 8 | 2021-11-02T20:57:44.000Z | 2022-03-13T09:42:29.000Z | classy/utils/rich_config.py | sunglasses-ai/classy | c166490a30d8ba6d7c25f70ce707b7a2ddcfb53f | [
"Apache-2.0"
] | null | null | null | import os
try:
import rich
from rich.console import Console
from rich.style import Style
from rich.text import Text
from rich.tree import Tree
except ImportError:
print("classy train [...] --print requires `pip install rich`")
exit()
from typing import Iterable, List, Optional, Tuple, Union
from omegaconf import DictConfig, ListConfig, OmegaConf
from classy.utils.config import ExplainableConfig, NodeInfo
from classy.utils.hydra_patch import ConfigBlame, NormalConfigBlame
class RichNodeInfo:
_CLASSY_GITHUB_CONFIG_URL = (
"https://github.com/sunglasses-ai/classy/tree/main/configurations"
)
def __init__(self, info: NodeInfo):
self.info = info
def render_value(self) -> Text:
value = self.info.value
if value is None:
return Text.from_markup(
"None",
style=Style(
bold=True,
color="orange1",
),
)
if value is True:
return Text.from_markup(
"True",
style=Style(
bold=True,
color="green",
),
)
if value is False:
return Text.from_markup(
"False",
style=Style(
bold=True,
color="red",
),
)
if isinstance(value, (int, float)):
return Text(str(value), style=Style(color="cyan"))
return Text(value, style=Style(color="hot_pink"))
def __rich__(self):
key_name = self.info.key.split(".")[-1]
parts = [
key_name,
]
if self.info.is_leaf:
value = self.render_value()
parts.append(": ")
parts.append(value)
else:
value = self.info.value
if len(value) == 0:
if OmegaConf.is_list(value):
v = "[]"
elif OmegaConf.is_dict(value):
v = "{}"
else:
raise ValueError(
f"key {self.info.key} is neither a dict nor a list. {value}"
)
parts.append(": ")
parts.append(Text(v, style=Style(bold=True, color="yellow3")))
interp = self.info.interpolation
if interp:
parts.append(Text(f" [interp: {interp}]", style=Style(color="magenta")))
blame = self.info.blame
if blame:
if isinstance(blame, NormalConfigBlame):
# TODO: maybe we can improve this?
blame = str(blame)
assert blame.startswith("[source: ") and blame.endswith(
"]"
), f"Unknown blame: {blame}"
blame_val = blame[len("[source: ") : -1]
provider, config = blame_val.split("/", 1)
if provider == "main":
config_url = (
f"{RichNodeInfo._CLASSY_GITHUB_CONFIG_URL}/{config}.yaml"
)
parts.append(
Text.assemble(
" [source: ",
Text.from_markup(
f"[link={config_url}][blue]classy/{config}[/blue][/link]",
style="blue",
),
"]",
style="blue",
)
)
else:
parts.append(Text(f" {blame}", style=Style(color="blue")))
return Text.assemble(*parts)
class ConfigPrinter:
def __init__(
self,
cfg: Union[dict, DictConfig],
fields_order: Iterable[str] = (
"training",
"model",
"data",
"prediction",
"callbacks",
"logging",
),
skip_remaining: bool = False,
additional_blames: Optional[List[Tuple[List[str], "ConfigBlame"]]] = None,
label: str = "<root>",
):
self.expl = ExplainableConfig(cfg, additional_blames)
self.fields_order = fields_order
self.skip_remaining = skip_remaining
self.label = label
def get_rich_tree(self) -> Tree:
style = "dim"
tree = Tree(self.label, guide_style=style)
ordered_keys = list(self.fields_order)
if not self.skip_remaining:
ordered_keys += sorted(
set(self.expl.cfg.keys()).difference(self.fields_order)
)
for key in ordered_keys:
if key not in self.expl.cfg:
continue
for branch in self.walk_config(key, sort=False):
tree.add(branch)
return tree
@staticmethod
def join_keys(parent: Optional[str], key: str):
if parent is None:
return key
return f"{parent}.{key}"
def walk_config(self, key, sort: bool = True) -> List[Tree]:
sort_fn = sorted if sort else lambda item: item
info = self.expl.get_node_info(key)
value = info.value
t = Tree(RichNodeInfo(info))
if not info.is_leaf:
iterator = None
if isinstance(value, (DictConfig, dict)):
iterator = sort_fn(value.keys())
if isinstance(value, (ListConfig, list)):
iterator = map(str, range(len(value)))
assert iterator is not None, f"{key}: {value} is neither a List nor a Dict"
for k in iterator:
for child in self.walk_config(self.join_keys(key, k)):
t.add(child)
return [t]
def get_rich_tree_config(
cfg: DictConfig,
blames: Optional[List[Tuple[List[str], ConfigBlame]]] = None,
tree_label: str = "<root>",
):
return ConfigPrinter(
cfg, additional_blames=blames, label=tree_label
).get_rich_tree()
def print_config(
cfg: DictConfig,
blames: Optional[List[Tuple[List[str], ConfigBlame]]] = None,
tree_label: str = "<root>",
):
rich.print(get_rich_tree_config(cfg, blames, tree_label))
RICH_ST_CODE_FORMAT = (
"<pre style=\"font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace; "
'line-height: 1.1; background-color: rgb(248, 249, 251); ">'
"<code>{code}</code>"
"</pre>"
)
def rich_to_html(renderable, print_to_console: bool = False, width: int = 230):
with open(os.devnull, "w") as f:
console = Console(
record=True, file=None if print_to_console else f, width=width
)
console.print(renderable)
html = console.export_html(inline_styles=True, code_format=RICH_ST_CODE_FORMAT)
# adjust links to open in new windows
html = html.replace("<a ", '<a target="_blank" ')
return html
| 29.278481 | 90 | 0.516933 |
2a0fc1b2c1ce77fb3f8e865d6eaf3e57a8d35149 | 34,237 | py | Python | splash/tests/mockserver.py | janmechtel/splash | 13e66377cf0edae27f401712a28938d478afb495 | [
"BSD-3-Clause"
] | null | null | null | splash/tests/mockserver.py | janmechtel/splash | 13e66377cf0edae27f401712a28938d478afb495 | [
"BSD-3-Clause"
] | null | null | null | splash/tests/mockserver.py | janmechtel/splash | 13e66377cf0edae27f401712a28938d478afb495 | [
"BSD-3-Clause"
] | 1 | 2022-03-22T10:49:50.000Z | 2022-03-22T10:49:50.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os
import optparse
import base64
import random
from functools import wraps
from urllib.parse import unquote
from twisted.web.server import Site, NOT_DONE_YET
from twisted.web.resource import Resource
from twisted.internet import reactor, ssl
from twisted.internet.task import deferLater
from .proxies import ProxyFactory, AuthProxyFactory
_REQUIRED = object()
def getarg(request, name, default=_REQUIRED, type=str):
if not isinstance(name, bytes):
name = name.encode('utf8')
value = request.args.get(name, [None])[0]
if value is not None:
if type is not None:
value = type(value.decode('utf-8'))
return value
elif default is _REQUIRED:
raise Exception("Missing argument: %s" % name)
else:
return default
def use_chunked_encoding(func):
"""
A workaround for Twisted issue.
See https://github.com/scrapinghub/splash/issues/52#issuecomment-73488224.
"""
@wraps(func)
def wrapper(self, request):
request.write(func(self, request))
request.finish()
return NOT_DONE_YET
return wrapper
def _html_resource(html):
class HtmlResource(Resource):
isLeaf = True
template = html
def __init__(self, http_port=None, https_port=None):
Resource.__init__(self)
self.http_port = http_port
self.https_port = https_port
@use_chunked_encoding
def render(self, request):
response = self.template % dict(
http_port=self.http_port,
https_port=self.https_port
)
# Twisted wants response to be bytes
return response.encode('utf-8')
return HtmlResource
JsRender = _html_resource("""
<html>
<body>
<p id="p1">Before</p>
<script>
document.getElementById("p1").innerHTML="After";
</script>
</body>
</html>
""")
JsAlert = _html_resource("""
<html>
<body>
<p id="p1">Before</p>
<script>
alert("hello");
document.getElementById("p1").innerHTML="After";
</script>
</body>
</html>
""")
JsConfirm = _html_resource("""
<html>
<body>
<p id="p1">Before</p>
<script>
confirm("are you sure?");
document.getElementById("p1").innerHTML="After";
</script>
</body>
</html>
""")
JsPrompt = _html_resource("""
<html>
<body>
<p id="p1">Before</p>
<script>
var result = prompt("are you sure?");
document.getElementById("p1").innerHTML = "After " + result;
</script>
</body>
</html>
""")
JsInterval = _html_resource("""
<html><body>
<div id='num'>not started</div>
<script>
var num=0;
setInterval(function(){
document.getElementById('num').innerHTML = num;
num += 1;
}, 1);
</script>
</body></html>
""")
JsViewport = _html_resource("""
<html><body>
<script>
document.write(window.innerWidth);
document.write('x');
document.write(window.innerHeight);
</script>
</body></html>
""")
TallPage = _html_resource("""
<html style='height:2000px'>
<body>Hello</body>
</html>
""")
RedGreenPage = _html_resource("""
<html>
<style type="text/css" media="screen">
* { padding: 0px; margin: 0px }
#left { float:left; width: 50%%; height: 100%%; background-color: #ff0000 }
#right { float:left; width: 50%%; height: 100%%; background-color: #00ff00 }
</style>
<body>
<div id="left"> </div><div id="right"> </div>
</body>
</html>
""")
BadRelatedResource = _html_resource("""
<html>
<body>
<img src="http://non-existing">
</body>
</html>
""")
EggSpamScript = _html_resource("function egg(){return 'spam';}")
class BaseUrl(Resource):
@use_chunked_encoding
def render_GET(self, request):
return b"""
<html>
<body>
<p id="p1">Before</p>
<script src="script.js"></script>
</body>
</html>
"""
def getChild(self, name, request):
if name == b"script.js":
return self.ScriptJs()
return self
class ScriptJs(Resource):
isLeaf = True
@use_chunked_encoding
def render_GET(self, request):
request.setHeader(b"Content-Type", b"application/javascript")
return b'document.getElementById("p1").innerHTML="After";'
class SetCookie(Resource):
"""
Set a cookie with key=key and value=value.
If "next" GET argument is passed, do a JS redirect to this "next" URL.
If "js" is True, set the cookie using JavaScript instead of an http header.
"""
isLeaf = True
@use_chunked_encoding
def render_GET(self, request):
key = getarg(request, b"key")
value = getarg(request, b"value")
js = getarg(request, b"use_js", False, type=bool)
next_url = unquote(getarg(request, b"next", ""))
script = ""
if js:
script += 'document.cookie = "%s=%s";\n' % (key, value)
else:
request.addCookie(key, value)
if next_url:
script += '''/* Redirecting now.. */
location.href = "%s";''' % next_url
if script:
return (u"""
<html><body>
<script>%s</script>
</body></html>
""" % script).encode('utf-8')
else:
return b"ok"
class GetCookie(Resource):
""" Return a cookie with key=key """
isLeaf = False
@use_chunked_encoding
def render_GET(self, request):
key = getarg(request, b"key").encode('utf-8')
value = request.getCookie(key) or b""
return value
class Delay(Resource):
""" Accept the connection; write the response after ``n`` seconds. """
isLeaf = True
def render_GET(self, request):
n = getarg(request, "n", 1, type=float)
d = deferLater(reactor, n, lambda: (request, n))
d.addCallback(self._delayedRender)
return NOT_DONE_YET
def _delayedRender(self, request_info):
request, n = request_info
request.write(("Response delayed for %0.3f seconds\n" % n).encode('utf-8'))
if not request._disconnected:
request.finish()
class SlowGif(Resource):
""" 1x1 black gif that loads n seconds """
isLeaf = True
def render_GET(self, request):
request.setHeader(b"Content-Type", b"image/gif")
request.setHeader(b"Cache-Control", b"no-cache, must-revalidate")
request.setHeader(b"Pragma", b"no-cache")
request.write(b"GIF89a")
n = getarg(request, "n", 1, type=float)
d = deferLater(reactor, n, lambda: (request, n))
d.addCallback(self._delayedRender)
return NOT_DONE_YET
def _delayedRender(self, request_info):
request, n = request_info
# write 1px black gif
gif_data = b'AQABAIAAAAAAAAAAACH5BAAAAAAALAAAAAABAAEAAAICTAEAOw=='
request.write(base64.decodebytes(gif_data))
if not request._disconnected:
request.finish()
class ShowImage(Resource):
"""
Show a 50x50 black image.
GET arguments:
* n - emulate slow image; it will take `n` seconds to load the image;
* js - inject image using JS only after `js` seconds.
"""
isLeaf = True
@use_chunked_encoding
def render_GET(self, request):
token = random.random() # prevent caching
n = getarg(request, "n", 0, type=float)
js = getarg(request, "js", 0, type=float)
img = (
"<img id='foo' width=50 heigth=50 "
" src='/slow.gif?n=%s&rnd=%s'>" % (n, token)
)
if not js:
res = "<html><body>%s</body></html>" % img
else:
res = """
<html><body id="body">
<script>
setTimeout(function(){
document.getElementById('body').innerHTML="%s";
}, %s);
</script>
</body></html>
""" % (img, js * 1000)
return res.encode('utf-8')
class IframeResource(Resource):
def __init__(self, http_port):
Resource.__init__(self)
self.putChild(b"1.html", self.IframeContent1())
self.putChild(b"2.html", self.IframeContent2())
self.putChild(b"3.html", self.IframeContent3())
self.putChild(b"4.html", self.IframeContent4())
self.putChild(b"5.html", self.IframeContent5())
self.putChild(b"6.html", self.IframeContent6())
self.putChild(b"script.js", self.ScriptJs())
self.putChild(b"script2.js", self.OtherDomainScript())
self.putChild(b"nested.html", self.NestedIframeContent())
self.http_port = http_port
@use_chunked_encoding
def render(self, request):
return ("""
<html>
<head>
<script src="/iframes/script.js"></script>
<script src="http://0.0.0.0:%s/iframes/script2.js"></script>
</head>
<body>
<iframe src="/iframes/1.html">
<p>no iframe 1</p>
</iframe>
<iframe src="/iframes/2.html">
<p>no iframe 2</p>
</iframe>
<p id="js-iframe">no js iframes</p>
<p id="js-iframe2">no delayed js iframes</p>
<p id="js-iframe3">no js iframes created in window.onload</p>
<script type="text/javascript">
document.getElementById('js-iframe').innerHTML="<iframe src='/iframes/3.html'>js iframes don't work</iframe>"
</script>
<script type="text/javascript">
window.setTimeout(function(){
document.getElementById('js-iframe2').innerHTML="<iframe src='/iframes/4.html'>delayed js iframes don't work</iframe>";
}, 100);
</script>
<script type="text/javascript">
window.onload = function(){
document.getElementById('js-iframe3').innerHTML="<iframe src='/iframes/5.html'>js iframes created in window.onload don't work</iframe>";
};
</script>
</body>
</html>
""" % self.http_port).encode('utf-8')
IframeContent1 = _html_resource("<html><body>iframes work IFRAME_1_OK</body></html>")
IframeContent2 = _html_resource("""
<html><body>
<iframe src="/iframes/nested.html" width=200 height=200>
<p>nested iframes don't work</p>
</iframe>
</body></html>
""")
IframeContent3 = _html_resource("<html><body>js iframes work IFRAME_2_OK</body></html>")
IframeContent4 = _html_resource("<html><body>delayed js iframes work IFRAME_3_OK</body></html>")
IframeContent5 = _html_resource("<html><body>js iframes created in window.onoad work IFRAME_4_OK</body></html>")
IframeContent6 = _html_resource("<html><body>js iframes created by document.write in external script work IFRAME_5_OK</body></html>")
NestedIframeContent = _html_resource("<html><body><p>nested iframes work IFRAME_6_OK</p></body></html>")
class ScriptJs(Resource):
isLeaf = True
@use_chunked_encoding
def render(self, request):
request.setHeader(b"Content-Type", b"application/javascript")
iframe_html = " SAME_DOMAIN <iframe src='/iframes/6.html'>js iframe created by document.write in external script doesn't work</iframe>"
return ('''document.write("%s");''' % iframe_html).encode('utf-8')
class OtherDomainScript(Resource):
isLeaf = True
@use_chunked_encoding
def render(self, request):
request.setHeader(b"Content-Type", b"application/javascript")
return "document.write(' OTHER_DOMAIN ');".encode('utf-8')
class PostResource(Resource):
""" Return a HTML file with all HTTP headers and the POST data """
@use_chunked_encoding
def render_POST(self, request):
code = request.args.get('code', [200])[0]
request.setResponseCode(int(code))
request.setHeader(b"Content-Type", b"text/plain; charset=utf-8")
headers = request.getAllHeaders()
payload = request.content.getvalue() if request.content is not None else b''
return ("""
<html>
<body>
<p id="p1">From POST</p>
<p id="headers">
%s
</p>
<p id="payload">
%s
</p>
</body>
</html>
""" % (headers, repr(payload))).encode('utf-8')
class GetResource(Resource):
""" Return a HTML file with all HTTP headers and all GET arguments """
@use_chunked_encoding
def render_GET(self, request):
code = request.args.get(b'code', [200])[0]
request.setResponseCode(int(code))
empty_body = bool(request.args.get(b'empty', [b''])[0])
if empty_body:
return b""
headers = request.getAllHeaders()
payload = request.args
return ("""
<html>
<body>
<p id="p1">GET request</p>
<p id="headers">
%s
</p>
<p id="arguments">
%s
</p>
</body>
</html>
""" % (headers, payload)).encode('utf-8')
class EchoUrl(Resource):
def render_GET(self, request):
return request.uri
def getChild(self, name, request):
return self
JsPostResource = _html_resource("""
<html>
<body>
<form action="/postrequest" method="POST">
<input type="hidden" name="hidden-field" value="i-am-hidden"/>
<input type="text" name="a-field" value="field value"/>
<input type="submit" value="go"/>
</form>
<script>document.querySelector('form').submit();</script>
</body>
</html>
""")
class XHRPostPage(Resource):
isLeaf = True
def render_GET(self, request):
content_type = getarg(request, "content_type",
"application/octet-stream")
body = getarg(request, "body", "Hello world!")
# Used to test large requests.
body_repeat = int(getarg(request, "body_repeat", 1))
body *= body_repeat
res = """
<html>
<body>
<script>
var xhr = new XMLHttpRequest();
xhr.open("POST", "/postrequest");
xhr.setRequestHeader("Content-Type", %s);
xhr.send(%s);
</script>
</body>
</html>
""" % (json.dumps(content_type), json.dumps(body))
return res.encode('utf-8')
ExternalIFrameResource = _html_resource("""
<html>
<body>
<iframe id='external' src="https://localhost:%(https_port)s/external">
</iframe>
</body>
</html>
""")
ExternalResource = _html_resource("""
<html>
<body>EXTERNAL</body>
</html>
""")
JsRedirect = _html_resource("""
<html><body>
Redirecting now..
<script> window.location = '/jsredirect-target'; </script>
</body></html>
""")
JsRedirectSlowImage = _html_resource("""
<html><body>
Redirecting now..
<img width=10 heigth=10 src="/slow.gif?n=2">
<script> window.location = '/jsredirect-target'; </script>
</body></html>
""")
JsRedirectOnload = _html_resource("""
<html>
<head>
<script>
window.onload = function(){
window.location = '/jsredirect-target';
}
</script>
</head>
<body>Redirecting on window.load...</body>
</html>
""")
JsRedirectTimer = _html_resource("""
<html>
<head>
<script>
window.setTimeout(function(){
window.location = '/jsredirect-target';
}, 100);
</script>
</head>
<body>Redirecting on setTimeout callback...</body>
</html>
""")
JsRedirectInfinite = _html_resource("""
<html>
<head><script> window.location = '/jsredirect-infinite2'; </script></head>
<body>Redirecting infinitely, step #1</body>
</html>
""")
JsRedirectInfinite2 = _html_resource("""
<html>
<head><script> window.location = '/jsredirect-infinite'; </script></head>
<body>Redirecting infinitely, step #2</body>
</html>
""")
JsRedirectToJsRedirect = _html_resource("""
<html><body>
Redirecting to an another redirecting page..
<script>
window.location = '/jsredirect';
</script>
</body></html>
""")
JsRedirectToNonExisting = _html_resource("""
<html><body>
Redirecting to non-existing domain..
<script>
window.location = 'http://non-existing';
</script>
</body></html>
""")
JsRedirectTarget = _html_resource("""
<html><body> JS REDIRECT TARGET </body></html>
""")
MetaRedirect0 = _html_resource("""
<html><head>
<meta http-equiv="REFRESH" content="0; URL=/meta-redirect-target/">
</head>
<body></body></html>
""")
MetaRedirectSlowLoad = _html_resource("""
<html><head>
<meta http-equiv="REFRESH" content="0; URL=/meta-redirect-target/">
</head>
<body><img src="/delay?n=0.2"></body></html>
""")
MetaRedirectSlowLoad2 = _html_resource("""
<html><head>
<meta http-equiv="REFRESH" content="0; URL=/meta-redirect-target/">
</head>
<body><img width=10 heigth=10 src="/slow.gif?n=2"></body></html>
""")
MetaRedirect1 = _html_resource("""
<html><head>
<meta http-equiv="REFRESH" content="0.2; URL=/meta-redirect-target/">
</head>
<body>
""")
MetaRedirectTarget = _html_resource("""
<html><body> META REDIRECT TARGET </body></html>
""")
VeryLongGreenPage = _html_resource("""
<html>
<style>
* { margin: 0px; padding: 0px }
</style>
<body style="border: 1px solid #00FF77; height:59998px; background-color: #00FF77">
Hello, I am a loooooong green page
</body></html>
""")
RgbStripesPage = _html_resource("""
<html>
<style>
* { margin: 0px; padding: 0px; }
body {
background: -webkit-repeating-linear-gradient(
-90deg,
#ff0000, #ff0000 1px,
#00ff00 1px, #00ff00 2px,
#0000ff 2px, #0000ff 3px);
width: 10px;
height: 10px
}
</style>
<body>
 
</body>
</html>
""")
InputsPage = _html_resource("""
<html>
<body>
<input type="text" id="input1"/>
<input type="text" id="input2"/>
<input type="text" id="input3"/>
</body>
</html>
""")
FocusedTextareaPage = _html_resource("""
<html>
<body onload="load();">
<textarea id="text"></textarea>
<script type="text/javascript">
var load = function() {
document.getElementById('text').focus();
}
</script>
</body>
</html>
""")
FormInputsEventPage = _html_resource("""
<html>
<body onload="load();">
<h1 id="result"></h1>
<form>
<input name="username" type="text" />
<input name="password" type="text" />
<input type="submit"/>
</form>
<script type="text/javascript">
var load = function() {
document.querySelector('form').onsubmit = function(ev) {
var inputs = document.querySelectorAll('input[type="text"]');
var values = [];
for (var i = 0; i < inputs.length; i++)
values.push(inputs[i].value);
document.getElementById('result').innerHTML = values.join('|');
return false;
};
};
</script>
</body>
</html>
""")
KeyPressEventLoggerPage = _html_resource("""
<html>
<body onload="load();">
<script type="text/javascript">
var load = function() {
document.onkeypress = function(ev) {
ev.preventDefault();
var out = '<li>' + ev.keyCode + '</li>'
document.getElementById('output').innerHTML += out;
return false;
};
}
</script>
<ul id="output"></ul>
</body>
</html>
""")
KeyUpDownEventLoggerPage = _html_resource("""
<html>
<body onload="load();">
<script type="text/javascript">
var load = function() {
var handle_key = function(prefix) {
return function(ev) {
ev.preventDefault();
var out = '<li>' + prefix + ev.keyCode + '</li>'
document.getElementById('output').innerHTML += out;
return false;
}
};
document.onkeydown = handle_key('+');
document.onkeyup = handle_key('-');
}
</script>
<ul id="output"></ul>
</body>
</html>
""")
VariousElementsPage = _html_resource("""
<html>
<head>
<style>
html, body { margin: 0; padding: 0; }
</style>
</head>
<body>
<button id="showTitleBtn" type="button" onclick="document.querySelector('.title').style.display = 'block'">Click me</button>
<h1 id="title" class="title" style="display: none ">Title</h1>
<form id="login" action="/submitted">
<input type="text" name="username" value="admin" />
<input type="password" name="password" value="pass123" />
<input type="checkbox" name="remember" />
</form>
<form id="form">
<input type="text" name="foo[]" value="coffee" />
<input type="text" name="foo[]" value="milk" />
<input type="text" name="foo[]" value="eggs" />
<input type="text" name="baz" value="foo" />
<input type="radio" name="choice" value="yes" />
<input type="radio" name="choice" value="no" checked />
<input type="checkbox" name="check" value="checked" checked />
<select multiple name="selection">
<option value="1" selected>1</option>
<option value="2">2</option>
<option value="3" selected>2</option>
</select>
</form>
<div id="editable" contenteditable style="width: 100px; height: 100px;"></div>
<p class="p" id="multiline-inline" style="width: 5px"><span>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.</span></p>
<div id="block" onclick="this.parentNode.remove()" style="position: relative">
<div id="nestedBlock" style="position: absolute; top: 10px; left: 10px;">nested</div>
</div>
<div class="test" id="clickMe" onclick="this.innerText = (+this.innerText) + 1">0</div>
<div id="hoverMe" onmousemove="this.innerText = (+this.innerText) + 1">0</div>
<div id="parent"><div id="child">click</div></div>
</body>
</html>
""")
class HttpRedirectResource(Resource):
def render_GET(self, request):
code = request.args[b'code'][0].decode('utf-8')
url = '/getrequest?http_code=%s' % code
request.setResponseCode(int(code))
request.setHeader(b"location", url.encode('latin1'))
return ("%s redirect to %s" % (code, url)).encode('latin1')
def render_POST(self, request):
request.setResponseCode(301)
payload = request.content.getvalue() if request.content is not None else ''
url = ('/getrequest?%s' % payload).encode('latin1')
request.setHeader(b"location", url)
return b"redirect to " + url
class JsRedirectTo(Resource):
""" Do a JS redirect to an URL passed in "url" GET argument. """
isLeaf = True
@use_chunked_encoding
def render_GET(self, request):
url = getarg(request, b"url")
next_url = unquote(url)
return ("""
<html><body>
Redirecting now..
<script> window.location = '%s'; </script>
</body></html>
""" % next_url).encode('utf-8')
class JsEventResource(Resource):
isLeaf = True
def render_GET(self, request):
"""
:param request: must contain event_type with valid string to use for filling up
JS event handler, e.g. "mouseover", "click" or something else.
:return: response will contain event data as strings in format property:value. h1 will dissappear
if event succeeds.
"""
event_type = request.args[b"event_type"][0].decode("utf8")
js_code = u"""
function modify_h1(e) {
var h1 = document.getElementById("h1");
h1.remove();
msg = ""
node = document.createElement("p")
for (k in e) {
msg += k + ":" + e[k] + ";"
}
node.textContent = msg;
document.getElementById("container").appendChild(node);
}
var element = document.getElementById("button");
element.addEventListener("%s", modify_h1, false);
var element_outside_viewport = document.getElementById("must_scroll_to_see");
element_outside_viewport.addEventListener("%s", modify_h1, false);
""" % (event_type, event_type)
html_with_js = u"""
<html>
<head></head>
<body>
<div id="container">
<h1 id="h1"> this must be removed after {0}</h1>
<button id="button">{0} here</button>
</div>
<button id="must_scroll_to_see" style="margin-top:1900px">button below sight level</button>
<script>
{1}
</script>
</body>
</html>
""".format(event_type, js_code)
return html_with_js.encode("utf8")
class CP1251Resource(Resource):
@use_chunked_encoding
def render_GET(self, request):
request.setHeader(b"Content-Type", b"text/html; charset=windows-1251")
return u'''
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=windows-1251">
</head>
<body>проверка</body>
</html>
'''.strip().encode('cp1251')
class Subresources(Resource):
""" Embedded css and image """
@use_chunked_encoding
def render_GET(self, request):
return ("""<html><head>
<link rel="stylesheet" href="style.css?_rnd={0}" />
</head>
<body>
<img id="image" src="img.gif?_rnd={0}"
onload="window.imageLoaded = true;"
onerror="window.imageLoaded = false;"/>
</body>
</html>""".format(random.randint(0, 1<<31))).encode('utf-8')
def getChild(self, name, request):
if name == b"style.css":
return self.StyleSheet()
if name == b"img.gif":
return self.Image()
return self
class StyleSheet(Resource):
@use_chunked_encoding
def render_GET(self, request):
request.setHeader(b"Content-Type", b"text/css; charset=utf-8")
print("Request Style!")
return b"body { background-color: red; }"
class Image(Resource):
@use_chunked_encoding
def render_GET(self, request):
request.setHeader(b"Content-Type", b"image/gif")
return base64.decodebytes(b'R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs=')
class SubresourcesWithCaching(Resource):
"""
Embedded css.
Allows caching of the image by setting the Cache-Control header.
Very similar to the /subresources/ endpoint.
"""
def getChild(self, name, request):
if name == b"img.gif":
return self.Image()
return self
class Image(Resource):
@use_chunked_encoding
def render_GET(self, request):
request.setHeader(b"Content-Type", b"image/gif")
request.setHeader(b"Cache-Control", b"public, max-age=999999, s-maxage=999999")
return base64.decodebytes(b'R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs=')
@use_chunked_encoding
def render_GET(self, request):
return ("""<html>
<body>
<img id="image" src="subresources-with-caching/img.gif"
onload="window.imageLoaded = true;"
onerror="window.imageLoaded = false;"/>
</body>
</html>""").encode('utf-8')
class SetHeadersResource(Resource):
@use_chunked_encoding
def render_GET(self, request):
for k, values in request.args.items():
for v in values:
request.setHeader(k, v)
return b""
class InvalidContentTypeResource(Resource):
@use_chunked_encoding
def render_GET(self, request):
request.setHeader(b"Content-Type", b"ABRACADABRA: text/html; charset=windows-1251")
return u'''проверка'''.encode('cp1251')
class InvalidContentTypeResource2(Resource):
@use_chunked_encoding
def render_GET(self, request):
request.setHeader(b"Content-Type", b"text-html; charset=utf-8")
return b"ok"
class InvalidStatusMessageResource(Resource):
def render_GET(self, request):
request.setResponseCode(200, u"успех".encode('cp1251'))
return b'ok'
class Index(Resource):
isLeaf = True
def __init__(self, rootChildren):
self.rootChildren = rootChildren
@use_chunked_encoding
def render(self, request):
paths = [path.decode('ascii')
for (path, child) in self.rootChildren.items() if path]
links = "\n".join([
"<li><a href='%s'>%s</a></li>" % (path, path)
for path in paths
])
return ("""
<html>
<body><ul>%s</ul></body>
</html>
""" % links).encode('utf-8')
class GzipRoot(Resource):
def __init__(self, original_children):
Resource.__init__(self)
try:
from twisted.web.server import GzipEncoderFactory
from twisted.web.resource import EncodingResourceWrapper
for path, child in original_children.items():
self.putChild(
path,
EncodingResourceWrapper(child, [GzipEncoderFactory()])
)
except ImportError:
pass
class Root(Resource):
def __init__(self, http_port, https_port, proxy_port):
Resource.__init__(self)
self.log = []
self.putChild(b"postrequest", PostResource())
self.putChild(b"getrequest", GetResource())
self.putChild(b"jsrender", JsRender())
self.putChild(b"jsalert", JsAlert())
self.putChild(b"jsconfirm", JsConfirm())
self.putChild(b"jsprompt", JsPrompt())
self.putChild(b"jsinterval", JsInterval())
self.putChild(b"jsviewport", JsViewport())
self.putChild(b"jspost", JsPostResource())
self.putChild(b"tall", TallPage())
self.putChild(b"red-green", RedGreenPage())
self.putChild(b"baseurl", BaseUrl())
self.putChild(b"delay", Delay())
self.putChild(b"slow.gif", SlowGif())
self.putChild(b"show-image", ShowImage())
self.putChild(b"iframes", IframeResource(http_port))
self.putChild(b"externaliframe", ExternalIFrameResource(https_port=https_port))
self.putChild(b"external", ExternalResource())
self.putChild(b"cp1251", CP1251Resource())
self.putChild(b"cp1251-invalid", InvalidContentTypeResource())
self.putChild(b"bad-status-code-message", InvalidStatusMessageResource())
self.putChild(b"bad-related", BadRelatedResource())
self.putChild(b"set-cookie", SetCookie()),
self.putChild(b"get-cookie", GetCookie()),
self.putChild(b"eggspam.js", EggSpamScript()),
self.putChild(b"very-long-green-page", VeryLongGreenPage())
self.putChild(b"rgb-stripes", RgbStripesPage())
self.putChild(b"subresources", Subresources())
self.putChild(b"subresources-with-caching", SubresourcesWithCaching())
self.putChild(b"set-header", SetHeadersResource())
self.putChild(b"echourl", EchoUrl())
self.putChild(b"bad-content-type", InvalidContentTypeResource())
self.putChild(b"bad-content-type2", InvalidContentTypeResource2())
self.putChild(b"jsevent", JsEventResource())
self.putChild(b"inputs-page", InputsPage())
self.putChild(b"focused-input", FocusedTextareaPage())
self.putChild(b"form-inputs-event-page", FormInputsEventPage())
self.putChild(b"key-press-event-logger-page", KeyPressEventLoggerPage())
self.putChild(b"key-up-down-event-logger-page", KeyUpDownEventLoggerPage())
self.putChild(b"various-elements", VariousElementsPage())
# self.putChild(b"flashpage", FlashPage())
self.putChild(b"jsredirect", JsRedirect())
self.putChild(b"jsredirect-to", JsRedirectTo())
self.putChild(b"jsredirect-slowimage", JsRedirectSlowImage())
self.putChild(b"jsredirect-onload", JsRedirectOnload())
self.putChild(b"jsredirect-timer", JsRedirectTimer())
self.putChild(b"jsredirect-chain", JsRedirectToJsRedirect())
self.putChild(b"jsredirect-target", JsRedirectTarget())
self.putChild(b"jsredirect-infinite", JsRedirectInfinite())
self.putChild(b"jsredirect-infinite2", JsRedirectInfinite2())
self.putChild(b"jsredirect-non-existing", JsRedirectToNonExisting())
self.putChild(b"meta-redirect0", MetaRedirect0())
self.putChild(b"meta-redirect-slowload", MetaRedirectSlowLoad())
self.putChild(b"meta-redirect-slowload2", MetaRedirectSlowLoad2())
self.putChild(b"meta-redirect1", MetaRedirect1())
self.putChild(b"meta-redirect-target", MetaRedirectTarget())
self.putChild(b"http-redirect", HttpRedirectResource())
self.putChild(b"do-post", XHRPostPage())
self.putChild(b"", Index(self.children))
self.putChild(b"gzip", GzipRoot(self.children))
def cert_path():
return os.path.join(os.path.dirname(__file__), "server.pem")
def ssl_factory():
pem = cert_path()
return ssl.DefaultOpenSSLContextFactory(pem, pem)
def run(port_num, sslport_num, proxyport_num, authproxyport_num,
authproxy_user, verbose=True):
root = Root(port_num, sslport_num, proxyport_num)
factory = Site(root)
port = reactor.listenTCP(port_num, factory)
sslport = reactor.listenSSL(sslport_num, factory, ssl_factory())
proxyport = reactor.listenTCP(proxyport_num, ProxyFactory())
authproxyport = reactor.listenTCP(authproxyport_num,
AuthProxyFactory(authproxy_user))
def print_listening():
h = port.getHost()
s = sslport.getHost()
p = proxyport.getHost()
ap = authproxyport.getHost()
print("Mock server running at http://%s:%d (http), "
"https://%s:%d (https) and http://%s:%d (proxy) "
"and http://%s:%d (proxy with auth, user: %s)" %
(h.host, h.port, s.host, s.port, p.host, p.port,
ap.host, ap.port, authproxy_user))
if verbose:
import sys
from twisted.python import log
log.startLogging(sys.stdout)
reactor.callWhenRunning(print_listening)
reactor.run()
if __name__ == "__main__":
op = optparse.OptionParser()
op.add_option("--http-port", type=int, default=8998)
op.add_option("--https-port", type=int, default=8999)
op.add_option("--proxy-port", type=int, default=8990)
op.add_option("--auth-proxy-port", type=int, default=8995)
op.add_option("--auth-proxy-user", type=str, default="test")
op.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False)
opts, _ = op.parse_args()
run(
port_num=opts.http_port,
sslport_num=opts.https_port,
proxyport_num=opts.proxy_port,
authproxyport_num=opts.auth_proxy_port,
authproxy_user=opts.auth_proxy_user,
verbose=not opts.quiet
)
| 29.014407 | 194 | 0.612349 |
0ebfc9121054fd9490dcb9eefdc56665903c0561 | 842 | py | Python | doodledashboard/filters/message_from_source.py | FlyingTopHat/Slack-Dashboard | 28ed478d8927b8e71db48a521493919a9c0b5797 | [
"MIT"
] | 1 | 2019-09-21T09:03:54.000Z | 2019-09-21T09:03:54.000Z | doodledashboard/filters/message_from_source.py | FlyingTopHat/Slack-Dashboard | 28ed478d8927b8e71db48a521493919a9c0b5797 | [
"MIT"
] | 46 | 2018-01-01T12:56:30.000Z | 2020-02-11T08:30:42.000Z | doodledashboard/filters/message_from_source.py | FlyingTopHat/Slack-Dashboard | 28ed478d8927b8e71db48a521493919a9c0b5797 | [
"MIT"
] | 2 | 2018-08-26T08:25:49.000Z | 2020-11-04T03:47:08.000Z | from doodledashboard.component import FilterCreator, MissingRequiredOptionException
from doodledashboard.filters.filter import MessageFilter
class MessageFromSourceFilter(MessageFilter):
def __init__(self, source_name):
MessageFilter.__init__(self)
self._source_name = source_name
def filter(self, message):
return message.source_name == self._source_name
@property
def source_name(self):
return self._source_name
class MessageFromSourceFilterCreator(FilterCreator):
@staticmethod
def get_id():
return "message-from-source"
def create(self, options, secret_store):
if "source-name" not in options:
raise MissingRequiredOptionException("Expected 'source-name' option to exist")
return MessageFromSourceFilter(str(options["source-name"]))
| 29.034483 | 90 | 0.733967 |
2024d5bbe405ba6f84428a6c0eb40a29c301a867 | 72,690 | py | Python | test/unit/test_contracts_graph_parsed.py | jwills/dbt-core | aeee1c23a6826aa1d254a939655cb514f38db649 | [
"Apache-2.0"
] | null | null | null | test/unit/test_contracts_graph_parsed.py | jwills/dbt-core | aeee1c23a6826aa1d254a939655cb514f38db649 | [
"Apache-2.0"
] | null | null | null | test/unit/test_contracts_graph_parsed.py | jwills/dbt-core | aeee1c23a6826aa1d254a939655cb514f38db649 | [
"Apache-2.0"
] | null | null | null | import pickle
import pytest
from dbt.node_types import NodeType
from dbt.contracts.files import FileHash
from dbt.contracts.graph.model_config import (
NodeConfig,
SeedConfig,
TestConfig,
SnapshotConfig,
SourceConfig,
EmptySnapshotConfig,
Hook,
)
from dbt.contracts.graph.parsed import (
ParsedModelNode,
DependsOn,
ColumnInfo,
ParsedGenericTestNode,
ParsedSnapshotNode,
IntermediateSnapshotNode,
ParsedNodePatch,
ParsedMacro,
ParsedExposure,
ParsedMetric,
ParsedSeedNode,
Docs,
MacroDependsOn,
ParsedSourceDefinition,
ParsedDocumentation,
ParsedHookNode,
ExposureOwner,
TestMetadata,
)
from dbt.contracts.graph.unparsed import (
ExposureType,
MetricFilter,
FreshnessThreshold,
MaturityType,
Quoting,
Time,
TimePeriod,
)
from dbt import flags
from dbt.dataclass_schema import ValidationError
from .utils import ContractTestCase, assert_symmetric, assert_from_dict, compare_dicts, assert_fails_validation, dict_replace, replace_config
@pytest.fixture
def populated_node_config_object():
result = NodeConfig(
column_types={'a': 'text'},
materialized='table',
post_hook=[Hook(sql='insert into blah(a, b) select "1", 1')]
)
result._extra['extra'] = 'even more'
return result
@pytest.fixture
def populated_node_config_dict():
return {
'column_types': {'a': 'text'},
'enabled': True,
'materialized': 'table',
'persist_docs': {},
'post-hook': [{'sql': 'insert into blah(a, b) select "1", 1', 'transaction': True}],
'pre-hook': [],
'quoting': {},
'tags': [],
'extra': 'even more',
'on_schema_change': 'ignore',
'meta': {},
'grants': {},
}
def test_config_populated(populated_node_config_object, populated_node_config_dict):
assert_symmetric(populated_node_config_object, populated_node_config_dict, NodeConfig)
pickle.loads(pickle.dumps(populated_node_config_object))
@pytest.fixture
def unrendered_node_config_dict():
return {
'column_types': {'a': 'text'},
'materialized': 'table',
'post_hook': 'insert into blah(a, b) select "1", 1',
}
different_node_configs = [
lambda c: dict_replace(c, post_hook=[]),
lambda c: dict_replace(c, materialized='view'),
lambda c: dict_replace(c, quoting={'database': True}),
lambda c: dict_replace(c, extra='different extra'),
lambda c: dict_replace(c, column_types={'a': 'varchar(256)'}),
]
same_node_configs = [
lambda c: dict_replace(c, tags=['mytag']),
lambda c: dict_replace(c, alias='changed'),
lambda c: dict_replace(c, schema='changed'),
lambda c: dict_replace(c, database='changed'),
]
@pytest.mark.parametrize('func', different_node_configs)
def test_config_different(unrendered_node_config_dict, func):
value = func(unrendered_node_config_dict)
assert not NodeConfig.same_contents(unrendered_node_config_dict, value)
@pytest.mark.parametrize('func', same_node_configs)
def test_config_same(unrendered_node_config_dict, func):
value = func(unrendered_node_config_dict)
assert unrendered_node_config_dict != value
assert NodeConfig.same_contents(unrendered_node_config_dict, value)
@pytest.fixture
def base_parsed_model_dict():
return {
'name': 'foo',
'root_path': '/root/',
'created_at': 1.0,
'resource_type': str(NodeType.Model),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': []},
'database': 'test_db',
'description': '',
'schema': 'test_schema',
'alias': 'bar',
'tags': [],
'config': {
'column_types': {},
'enabled': True,
'materialized': 'view',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'on_schema_change': 'ignore',
'meta': {},
'grants': {},
},
'deferred': False,
'docs': {'show': True},
'columns': {},
'meta': {},
'checksum': {'name': 'sha256', 'checksum': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'},
'unrendered_config': {},
'config_call_dict': {},
}
@pytest.fixture
def basic_parsed_model_object():
return ParsedModelNode(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from wherever',
name='foo',
resource_type=NodeType.Model,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
description='',
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
config=NodeConfig(),
meta={},
checksum=FileHash.from_contents(''),
created_at=1.0,
)
@pytest.fixture
def minimal_parsed_model_dict():
return {
'name': 'foo',
'root_path': '/root/',
'created_at': 1.0,
'resource_type': str(NodeType.Model),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'database': 'test_db',
'schema': 'test_schema',
'alias': 'bar',
'checksum': {'name': 'sha256', 'checksum': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'},
'unrendered_config': {},
}
@pytest.fixture
def complex_parsed_model_dict():
return {
'name': 'foo',
'root_path': '/root/',
'created_at': 1.0,
'resource_type': str(NodeType.Model),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from {{ ref("bar") }}',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': ['model.test.bar']},
'database': 'test_db',
'deferred': True,
'description': 'My parsed node',
'schema': 'test_schema',
'alias': 'bar',
'tags': ['tag'],
'meta': {},
'config': {
'column_types': {'a': 'text'},
'enabled': True,
'materialized': 'ephemeral',
'persist_docs': {},
'post-hook': [{'sql': 'insert into blah(a, b) select "1", 1', 'transaction': True}],
'pre-hook': [],
'quoting': {},
'tags': [],
'on_schema_change': 'ignore',
'meta': {},
'grants': {},
},
'docs': {'show': True},
'columns': {
'a': {
'name': 'a',
'description': 'a text field',
'meta': {},
'tags': [],
},
},
'checksum': {'name': 'sha256', 'checksum': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'},
'unrendered_config': {
'column_types': {'a': 'text'},
'materialized': 'ephemeral',
'post_hook': ['insert into blah(a, b) select "1", 1'],
},
'config_call_dict': {},
}
@pytest.fixture
def complex_parsed_model_object():
return ParsedModelNode(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from {{ ref("bar") }}',
name='foo',
resource_type=NodeType.Model,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(nodes=['model.test.bar']),
deferred=True,
description='My parsed node',
database='test_db',
schema='test_schema',
alias='bar',
tags=['tag'],
meta={},
config=NodeConfig(
column_types={'a': 'text'},
materialized='ephemeral',
post_hook=[Hook(sql='insert into blah(a, b) select "1", 1')],
),
columns={'a': ColumnInfo('a', 'a text field', {})},
checksum=FileHash.from_contents(''),
unrendered_config={
'column_types': {'a': 'text'},
'materialized': 'ephemeral',
'post_hook': ['insert into blah(a, b) select "1", 1'],
},
)
def test_model_basic(basic_parsed_model_object, base_parsed_model_dict, minimal_parsed_model_dict):
node = basic_parsed_model_object
node_dict = base_parsed_model_dict
compare_dicts(node.to_dict(), node_dict)
assert_symmetric(node, node_dict)
assert node.empty is False
assert node.is_refable is True
assert node.is_ephemeral is False
minimum = minimal_parsed_model_dict
assert_from_dict(node, minimum)
pickle.loads(pickle.dumps(node))
def test_model_complex(complex_parsed_model_object, complex_parsed_model_dict):
node = complex_parsed_model_object
node_dict = complex_parsed_model_dict
assert_symmetric(node, node_dict)
assert node.empty is False
assert node.is_refable is True
assert node.is_ephemeral is True
def test_invalid_bad_tags(base_parsed_model_dict):
# bad top-level field
bad_tags = base_parsed_model_dict
bad_tags['tags'] = 100
assert_fails_validation(bad_tags, ParsedModelNode)
def test_invalid_bad_materialized(base_parsed_model_dict):
# bad nested field
bad_materialized = base_parsed_model_dict
bad_materialized['config']['materialized'] = None
assert_fails_validation(bad_materialized, ParsedModelNode)
unchanged_nodes = [
lambda u: (u, u.replace(tags=['mytag'])),
lambda u: (u, u.replace(meta={'something': 1000})),
# True -> True
lambda u: (
replace_config(u, persist_docs={'relation': True}),
replace_config(u, persist_docs={'relation': True}),
),
lambda u: (
replace_config(u, persist_docs={'columns': True}),
replace_config(u, persist_docs={'columns': True}),
),
# only columns docs enabled, but description changed
lambda u: (
replace_config(u, persist_docs={'columns': True}),
replace_config(u, persist_docs={'columns': True}).replace(description='a model description'),
),
# only relation docs eanbled, but columns changed
lambda u: (
replace_config(u, persist_docs={'relation': True}),
replace_config(u, persist_docs={'relation': True}).replace(columns={'a': ColumnInfo(name='a', description='a column description')}),
),
# not tracked, we track config.alias/config.schema/config.database
lambda u: (u, u.replace(alias='other')),
lambda u: (u, u.replace(schema='other')),
lambda u: (u, u.replace(database='other')),
]
changed_nodes = [
lambda u: (u, u.replace(fqn=['test', 'models', 'subdir', 'foo'], original_file_path='models/subdir/foo.sql', path='/root/models/subdir/foo.sql')),
# None -> False is a config change even though it's pretty much the same
lambda u: (u, replace_config(u, persist_docs={'relation': False})),
lambda u: (u, replace_config(u, persist_docs={'columns': False})),
# persist docs was true for the relation and we changed the model description
lambda u: (
replace_config(u, persist_docs={'relation': True}),
replace_config(u, persist_docs={'relation': True}).replace(description='a model description'),
),
# persist docs was true for columns and we changed the model description
lambda u: (
replace_config(u, persist_docs={'columns': True}),
replace_config(u, persist_docs={'columns': True}).replace(columns={'a': ColumnInfo(name='a', description='a column description')}),
),
# not tracked, we track config.alias/config.schema/config.database
lambda u: (u, replace_config(u, alias='other')),
lambda u: (u, replace_config(u, schema='other')),
lambda u: (u, replace_config(u, database='other')),
]
@pytest.mark.parametrize('func', unchanged_nodes)
def test_compare_unchanged_parsed_model(func, basic_parsed_model_object):
node, compare = func(basic_parsed_model_object)
assert node.same_contents(compare)
@pytest.mark.parametrize('func', changed_nodes)
def test_compare_changed_model(func, basic_parsed_model_object):
node, compare = func(basic_parsed_model_object)
assert not node.same_contents(compare)
@pytest.fixture
def basic_parsed_seed_dict():
return {
'name': 'foo',
'root_path': '/root/',
'created_at': 1.0,
'resource_type': str(NodeType.Seed),
'path': '/root/seeds/seed.csv',
'original_file_path': 'seeds/seed.csv',
'package_name': 'test',
'raw_sql': '',
'unique_id': 'seed.test.foo',
'fqn': ['test', 'seeds', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': []},
'database': 'test_db',
'description': '',
'schema': 'test_schema',
'tags': [],
'alias': 'foo',
'config': {
'column_types': {},
'enabled': True,
'materialized': 'seed',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'on_schema_change': 'ignore',
'meta': {},
'grants': {},
},
'deferred': False,
'docs': {'show': True},
'columns': {},
'meta': {},
'checksum': {'name': 'path', 'checksum': 'seeds/seed.csv'},
'unrendered_config': {},
'config_call_dict': {},
}
@pytest.fixture
def basic_parsed_seed_object():
return ParsedSeedNode(
name='foo',
root_path='/root/',
resource_type=NodeType.Seed,
path='/root/seeds/seed.csv',
original_file_path='seeds/seed.csv',
package_name='test',
raw_sql='',
unique_id='seed.test.foo',
fqn=['test', 'seeds', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
database='test_db',
description='',
schema='test_schema',
tags=[],
alias='foo',
config=SeedConfig(),
# config=SeedConfig(quote_columns=True),
deferred=False,
docs=Docs(show=True),
columns={},
meta={},
checksum=FileHash(name='path', checksum='seeds/seed.csv'),
unrendered_config={},
)
@pytest.fixture
def minimal_parsed_seed_dict():
return {
'name': 'foo',
'root_path': '/root/',
'created_at': 1.0,
'resource_type': str(NodeType.Seed),
'path': '/root/seeds/seed.csv',
'original_file_path': 'seeds/seed.csv',
'package_name': 'test',
'raw_sql': '',
'unique_id': 'seed.test.foo',
'fqn': ['test', 'seeds', 'foo'],
'database': 'test_db',
'schema': 'test_schema',
'alias': 'foo',
'checksum': {'name': 'path', 'checksum': 'seeds/seed.csv'},
}
@pytest.fixture
def complex_parsed_seed_dict():
return {
'name': 'foo',
'root_path': '/root/',
'created_at': 1.0,
'resource_type': str(NodeType.Seed),
'path': '/root/seeds/seed.csv',
'original_file_path': 'seeds/seed.csv',
'package_name': 'test',
'raw_sql': '',
'unique_id': 'seed.test.foo',
'fqn': ['test', 'seeds', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': []},
'database': 'test_db',
'description': 'a description',
'schema': 'test_schema',
'tags': ['mytag'],
'alias': 'foo',
'config': {
'column_types': {},
'enabled': True,
'materialized': 'seed',
'persist_docs': {'relation': True, 'columns': True},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'quote_columns': True,
'on_schema_change': 'ignore',
'meta': {},
'grants': {},
},
'deferred': False,
'docs': {'show': True},
'columns': {'a': {'name': 'a', 'description': 'a column description', 'meta': {}, 'tags': []}},
'meta': {'foo': 1000},
'checksum': {'name': 'sha256', 'checksum': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'},
'unrendered_config': {
'persist_docs': {'relation': True, 'columns': True},
},
'config_call_dict': {},
}
@pytest.fixture
def complex_parsed_seed_object():
return ParsedSeedNode(
name='foo',
root_path='/root/',
resource_type=NodeType.Seed,
path='/root/seeds/seed.csv',
original_file_path='seeds/seed.csv',
package_name='test',
raw_sql='',
unique_id='seed.test.foo',
fqn=['test', 'seeds', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
database='test_db',
description='a description',
schema='test_schema',
tags=['mytag'],
alias='foo',
config=SeedConfig(
quote_columns=True,
persist_docs={'relation': True, 'columns': True},
),
deferred=False,
docs=Docs(show=True),
columns={'a': ColumnInfo(name='a', description='a column description')},
meta={'foo': 1000},
checksum=FileHash.from_contents(''),
unrendered_config={
'persist_docs': {'relation': True, 'columns': True},
},
)
def test_seed_basic(basic_parsed_seed_dict, basic_parsed_seed_object, minimal_parsed_seed_dict):
assert_symmetric(basic_parsed_seed_object, basic_parsed_seed_dict)
assert basic_parsed_seed_object.get_materialization() == 'seed'
assert_from_dict(basic_parsed_seed_object, minimal_parsed_seed_dict, ParsedSeedNode)
def test_seed_complex(complex_parsed_seed_dict, complex_parsed_seed_object):
assert_symmetric(complex_parsed_seed_object, complex_parsed_seed_dict)
assert complex_parsed_seed_object.get_materialization() == 'seed'
unchanged_seeds = [
lambda u: (u, u.replace(tags=['mytag'])),
lambda u: (u, u.replace(meta={'something': 1000})),
# True -> True
lambda u: (
replace_config(u, persist_docs={'relation': True}),
replace_config(u, persist_docs={'relation': True}),
),
lambda u: (
replace_config(u, persist_docs={'columns': True}),
replace_config(u, persist_docs={'columns': True}),
),
# only columns docs enabled, but description changed
lambda u: (
replace_config(u, persist_docs={'columns': True}),
replace_config(u, persist_docs={'columns': True}).replace(description='a model description'),
),
# only relation docs eanbled, but columns changed
lambda u: (
replace_config(u, persist_docs={'relation': True}),
replace_config(u, persist_docs={'relation': True}).replace(columns={'a': ColumnInfo(name='a', description='a column description')}),
),
lambda u: (u, u.replace(alias='other')),
lambda u: (u, u.replace(schema='other')),
lambda u: (u, u.replace(database='other')),
]
changed_seeds = [
lambda u: (u, u.replace(fqn=['test', 'models', 'subdir', 'foo'], original_file_path='models/subdir/foo.sql', path='/root/models/subdir/foo.sql')),
# None -> False is a config change even though it's pretty much the same
lambda u: (u, replace_config(u, persist_docs={'relation': False})),
lambda u: (u, replace_config(u, persist_docs={'columns': False})),
# persist docs was true for the relation and we changed the model description
lambda u: (
replace_config(u, persist_docs={'relation': True}),
replace_config(u, persist_docs={'relation': True}).replace(description='a model description'),
),
# persist docs was true for columns and we changed the model description
lambda u: (
replace_config(u, persist_docs={'columns': True}),
replace_config(u, persist_docs={'columns': True}).replace(columns={'a': ColumnInfo(name='a', description='a column description')}),
),
lambda u: (u, replace_config(u, alias='other')),
lambda u: (u, replace_config(u, schema='other')),
lambda u: (u, replace_config(u, database='other')),
]
@pytest.mark.parametrize('func', unchanged_seeds)
def test_compare_unchanged_parsed_seed(func, basic_parsed_seed_object):
node, compare = func(basic_parsed_seed_object)
assert node.same_contents(compare)
@pytest.mark.parametrize('func', changed_seeds)
def test_compare_changed_seed(func, basic_parsed_seed_object):
node, compare = func(basic_parsed_seed_object)
assert not node.same_contents(compare)
@pytest.fixture
def basic_parsed_model_patch_dict():
return {
'name': 'foo',
'description': 'The foo model',
'original_file_path': 'path/to/schema.yml',
'docs': {'show': True},
'meta': {},
'yaml_key': 'models',
'package_name': 'test',
'columns': {
'a': {
'name': 'a',
'description': 'a text field',
'meta': {},
'tags': [],
},
},
'config': {},
}
@pytest.fixture
def basic_parsed_model_patch_object():
return ParsedNodePatch(
name='foo',
yaml_key='models',
package_name='test',
description='The foo model',
original_file_path='path/to/schema.yml',
columns={'a': ColumnInfo(name='a', description='a text field', meta={})},
docs=Docs(),
meta={},
config={},
)
@pytest.fixture
def patched_model_object():
return ParsedModelNode(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from wherever',
name='foo',
resource_type=NodeType.Model,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
description='The foo model',
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
meta={},
config=NodeConfig(),
patch_path='test://path/to/schema.yml',
columns={'a': ColumnInfo(name='a', description='a text field', meta={})},
docs=Docs(),
checksum=FileHash.from_contents(''),
unrendered_config={},
)
def test_patch_parsed_model(basic_parsed_model_object, basic_parsed_model_patch_object, patched_model_object):
pre_patch = basic_parsed_model_object
pre_patch.patch(basic_parsed_model_patch_object)
pre_patch.created_at = 1.0
patched_model_object.created_at = 1.0
assert patched_model_object == pre_patch
@pytest.fixture
def minimal_parsed_hook_dict():
return {
'name': 'foo',
'root_path': '/root/',
'resource_type': str(NodeType.Operation),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'database': 'test_db',
'schema': 'test_schema',
'alias': 'bar',
'checksum': {'name': 'sha256', 'checksum': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'},
}
@pytest.fixture
def base_parsed_hook_dict():
return {
'name': 'foo',
'root_path': '/root/',
'created_at': 1.0,
'resource_type': str(NodeType.Operation),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': []},
'database': 'test_db',
'deferred': False,
'description': '',
'schema': 'test_schema',
'alias': 'bar',
'tags': [],
'config': {
'column_types': {},
'enabled': True,
'materialized': 'view',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'on_schema_change': 'ignore',
'meta': {},
'grants': {},
},
'docs': {'show': True},
'columns': {},
'meta': {},
'checksum': {'name': 'sha256', 'checksum': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'},
'unrendered_config': {},
'config_call_dict': {},
}
@pytest.fixture
def base_parsed_hook_object():
return ParsedHookNode(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from wherever',
name='foo',
resource_type=NodeType.Operation,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
description='',
deferred=False,
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
config=NodeConfig(),
index=None,
checksum=FileHash.from_contents(''),
unrendered_config={},
)
@pytest.fixture
def complex_parsed_hook_dict():
return {
'name': 'foo',
'root_path': '/root/',
'created_at': 1.0,
'resource_type': str(NodeType.Operation),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from {{ ref("bar") }}',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': ['model.test.bar']},
'deferred': False,
'database': 'test_db',
'description': 'My parsed node',
'schema': 'test_schema',
'alias': 'bar',
'tags': ['tag'],
'meta': {},
'config': {
'column_types': {'a': 'text'},
'enabled': True,
'materialized': 'table',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'on_schema_change': 'ignore',
'meta': {},
'grants': {},
},
'docs': {'show': True},
'columns': {
'a': {
'name': 'a',
'description': 'a text field',
'meta': {},
'tags': [],
},
},
'index': 13,
'checksum': {'name': 'sha256', 'checksum': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'},
'unrendered_config': {
'column_types': {'a': 'text'},
'materialized': 'table',
},
'config_call_dict': {},
}
@pytest.fixture
def complex_parsed_hook_object():
return ParsedHookNode(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from {{ ref("bar") }}',
name='foo',
resource_type=NodeType.Operation,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(nodes=['model.test.bar']),
description='My parsed node',
deferred=False,
database='test_db',
schema='test_schema',
alias='bar',
tags=['tag'],
meta={},
config=NodeConfig(
column_types={'a': 'text'},
materialized='table',
post_hook=[]
),
columns={'a': ColumnInfo('a', 'a text field', {})},
index=13,
checksum=FileHash.from_contents(''),
unrendered_config={
'column_types': {'a': 'text'},
'materialized': 'table',
},
)
def test_basic_parsed_hook(minimal_parsed_hook_dict, base_parsed_hook_dict, base_parsed_hook_object):
node = base_parsed_hook_object
node_dict = base_parsed_hook_dict
minimum = minimal_parsed_hook_dict
assert_symmetric(node, node_dict, ParsedHookNode)
assert node.empty is False
assert node.is_refable is False
assert node.get_materialization() == 'view'
assert_from_dict(node, minimum, ParsedHookNode)
pickle.loads(pickle.dumps(node))
def test_complex_parsed_hook(complex_parsed_hook_dict, complex_parsed_hook_object):
node = complex_parsed_hook_object
node_dict = complex_parsed_hook_dict
# what's different?
assert_symmetric(node, node_dict)
assert node.empty is False
assert node.is_refable is False
assert node.get_materialization() == 'table'
def test_invalid_hook_index_type(base_parsed_hook_dict):
bad_index = base_parsed_hook_dict
bad_index['index'] = 'a string!?'
assert_fails_validation(bad_index, ParsedHookNode)
@pytest.fixture
def minimal_parsed_schema_test_dict():
return {
'name': 'foo',
'root_path': '/root/',
'created_at': 1.0,
'resource_type': str(NodeType.Test),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'test.test.foo',
'fqn': ['test', 'models', 'foo'],
'database': 'test_db',
'schema': 'test_schema',
'alias': 'bar',
'meta': {},
'test_metadata': {
'name': 'foo',
'kwargs': {},
},
'checksum': {'name': 'sha256', 'checksum': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'},
'config_call_dict': {},
}
@pytest.fixture
def basic_parsed_schema_test_dict():
return {
'name': 'foo',
'root_path': '/root/',
'created_at': 1.0,
'resource_type': str(NodeType.Test),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'test.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': []},
'deferred': False,
'database': 'test_db',
'description': '',
'schema': 'test_schema',
'alias': 'bar',
'tags': [],
'meta': {},
'config': {
'enabled': True,
'materialized': 'test',
'tags': [],
'severity': 'ERROR',
'warn_if': '!= 0',
'error_if': '!= 0',
'fail_calc': 'count(*)',
'meta': {},
'schema': 'dbt_test__audit',
},
'docs': {'show': True},
'columns': {},
'test_metadata': {
'name': 'foo',
'kwargs': {},
},
'checksum': {'name': 'sha256', 'checksum': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'},
'unrendered_config': {},
'config_call_dict': {},
}
@pytest.fixture
def basic_parsed_schema_test_object():
return ParsedGenericTestNode(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from wherever',
name='foo',
resource_type=NodeType.Test,
unique_id='test.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
description='',
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
meta={},
config=TestConfig(),
test_metadata=TestMetadata(namespace=None, name='foo', kwargs={}),
checksum=FileHash.from_contents(''),
)
@pytest.fixture
def complex_parsed_schema_test_dict():
return {
'name': 'foo',
'root_path': '/root/',
'created_at': 1.0,
'resource_type': str(NodeType.Test),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from {{ ref("bar") }}',
'unique_id': 'test.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': ['model.test.bar']},
'database': 'test_db',
'deferred': False,
'description': 'My parsed node',
'schema': 'test_schema',
'alias': 'bar',
'tags': ['tag'],
'meta': {},
'config': {
'enabled': True,
'materialized': 'table',
'tags': [],
'severity': 'WARN',
'warn_if': '!= 0',
'error_if': '!= 0',
'fail_calc': 'count(*)',
'extra_key': 'extra value',
'meta': {},
'schema': 'dbt_test__audit',
},
'docs': {'show': False},
'columns': {
'a': {
'name': 'a',
'description': 'a text field',
'meta': {},
'tags': [],
},
},
'column_name': 'id',
'test_metadata': {
'name': 'foo',
'kwargs': {},
},
'checksum': {'name': 'sha256', 'checksum': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'},
'unrendered_config': {
'materialized': 'table',
'severity': 'WARN'
},
'config_call_dict': {},
}
@pytest.fixture
def complex_parsed_schema_test_object():
cfg = TestConfig(
materialized='table',
severity='WARN'
)
cfg._extra.update({'extra_key': 'extra value'})
return ParsedGenericTestNode(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from {{ ref("bar") }}',
name='foo',
resource_type=NodeType.Test,
unique_id='test.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(nodes=['model.test.bar']),
description='My parsed node',
database='test_db',
schema='test_schema',
alias='bar',
tags=['tag'],
meta={},
config=cfg,
columns={'a': ColumnInfo('a', 'a text field',{})},
column_name='id',
docs=Docs(show=False),
test_metadata=TestMetadata(namespace=None, name='foo', kwargs={}),
checksum=FileHash.from_contents(''),
unrendered_config={
'materialized': 'table',
'severity': 'WARN'
},
)
def test_basic_schema_test_node(minimal_parsed_schema_test_dict, basic_parsed_schema_test_dict, basic_parsed_schema_test_object):
node = basic_parsed_schema_test_object
node_dict = basic_parsed_schema_test_dict
minimum = minimal_parsed_schema_test_dict
assert_symmetric(node, node_dict, ParsedGenericTestNode)
assert node.empty is False
assert node.is_ephemeral is False
assert node.is_refable is False
assert node.get_materialization() == 'test'
assert_from_dict(node, minimum, ParsedGenericTestNode)
pickle.loads(pickle.dumps(node))
def test_complex_schema_test_node(complex_parsed_schema_test_dict, complex_parsed_schema_test_object):
# this tests for the presence of _extra keys
node = complex_parsed_schema_test_object # ParsedGenericTestNode
assert(node.config._extra['extra_key'])
node_dict = complex_parsed_schema_test_dict
assert_symmetric(node, node_dict)
assert node.empty is False
def test_invalid_column_name_type(complex_parsed_schema_test_dict):
# bad top-level field
bad_column_name = complex_parsed_schema_test_dict
bad_column_name['column_name'] = {}
assert_fails_validation(bad_column_name, ParsedGenericTestNode)
def test_invalid_severity(complex_parsed_schema_test_dict):
invalid_config_value = complex_parsed_schema_test_dict
invalid_config_value['config']['severity'] = 'WERROR'
assert_fails_validation(invalid_config_value, ParsedGenericTestNode)
@pytest.fixture
def basic_timestamp_snapshot_config_dict():
return {
'column_types': {},
'enabled': True,
'materialized': 'snapshot',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'unique_key': 'id',
'strategy': 'timestamp',
'updated_at': 'last_update',
'target_database': 'some_snapshot_db',
'target_schema': 'some_snapshot_schema',
'on_schema_change': 'ignore',
'meta': {},
'grants': {},
}
@pytest.fixture
def basic_timestamp_snapshot_config_object():
return SnapshotConfig(
strategy='timestamp',
updated_at='last_update',
unique_key='id',
target_database='some_snapshot_db',
target_schema='some_snapshot_schema',
)
@pytest.fixture
def complex_timestamp_snapshot_config_dict():
return {
'column_types': {'a': 'text'},
'enabled': True,
'materialized': 'snapshot',
'persist_docs': {},
'post-hook': [{'sql': 'insert into blah(a, b) select "1", 1', 'transaction': True}],
'pre-hook': [],
'quoting': {},
'tags': [],
'target_database': 'some_snapshot_db',
'target_schema': 'some_snapshot_schema',
'unique_key': 'id',
'extra': 'even more',
'strategy': 'timestamp',
'updated_at': 'last_update',
'on_schema_change': 'ignore',
'meta': {},
'grants': {},
}
@pytest.fixture
def complex_timestamp_snapshot_config_object():
cfg = SnapshotConfig(
column_types={'a': 'text'},
materialized='snapshot',
post_hook=[Hook(sql='insert into blah(a, b) select "1", 1')],
strategy='timestamp',
target_database='some_snapshot_db',
target_schema='some_snapshot_schema',
updated_at='last_update',
unique_key='id',
)
cfg._extra['extra'] = 'even more'
return cfg
def test_basic_timestamp_snapshot_config(basic_timestamp_snapshot_config_dict, basic_timestamp_snapshot_config_object):
cfg = basic_timestamp_snapshot_config_object
cfg_dict = basic_timestamp_snapshot_config_dict
assert_symmetric(cfg, cfg_dict)
pickle.loads(pickle.dumps(cfg))
def test_complex_timestamp_snapshot_config(complex_timestamp_snapshot_config_dict, complex_timestamp_snapshot_config_object):
cfg = complex_timestamp_snapshot_config_object
cfg_dict = complex_timestamp_snapshot_config_dict
assert_symmetric(cfg, cfg_dict, SnapshotConfig)
def test_invalid_missing_updated_at(basic_timestamp_snapshot_config_dict):
bad_fields = basic_timestamp_snapshot_config_dict
del bad_fields['updated_at']
bad_fields['check_cols'] = 'all'
assert_fails_validation(bad_fields, SnapshotConfig)
@pytest.fixture
def basic_check_snapshot_config_dict():
return {
'column_types': {},
'enabled': True,
'materialized': 'snapshot',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'target_database': 'some_snapshot_db',
'target_schema': 'some_snapshot_schema',
'unique_key': 'id',
'strategy': 'check',
'check_cols': 'all',
'on_schema_change': 'ignore',
'meta': {},
'grants': {},
}
@pytest.fixture
def basic_check_snapshot_config_object():
return SnapshotConfig(
strategy='check',
check_cols='all',
unique_key='id',
target_database='some_snapshot_db',
target_schema='some_snapshot_schema',
)
@pytest.fixture
def complex_set_snapshot_config_dict():
return {
'column_types': {'a': 'text'},
'enabled': True,
'materialized': 'snapshot',
'persist_docs': {},
'post-hook': [{'sql': 'insert into blah(a, b) select "1", 1', 'transaction': True}],
'pre-hook': [],
'quoting': {},
'tags': [],
'target_database': 'some_snapshot_db',
'target_schema': 'some_snapshot_schema',
'unique_key': 'id',
'extra': 'even more',
'strategy': 'check',
'check_cols': ['a', 'b'],
'on_schema_change': 'ignore',
'meta': {},
'grants': {},
}
@pytest.fixture
def complex_set_snapshot_config_object():
cfg = SnapshotConfig(
column_types={'a': 'text'},
materialized='snapshot',
post_hook=[Hook(sql='insert into blah(a, b) select "1", 1')],
strategy='check',
check_cols=['a', 'b'],
target_database='some_snapshot_db',
target_schema='some_snapshot_schema',
unique_key='id',
)
cfg._extra['extra'] = 'even more'
return cfg
def test_basic_snapshot_config(basic_check_snapshot_config_dict, basic_check_snapshot_config_object):
cfg_dict = basic_check_snapshot_config_dict
cfg = basic_check_snapshot_config_object
assert_symmetric(cfg, cfg_dict, SnapshotConfig)
pickle.loads(pickle.dumps(cfg))
def test_complex_snapshot_config(complex_set_snapshot_config_dict, complex_set_snapshot_config_object):
cfg_dict = complex_set_snapshot_config_dict
cfg = complex_set_snapshot_config_object
assert_symmetric(cfg, cfg_dict)
pickle.loads(pickle.dumps(cfg))
def test_invalid_check_wrong_strategy(basic_check_snapshot_config_dict):
wrong_strategy = basic_check_snapshot_config_dict
wrong_strategy['strategy'] = 'timestamp'
assert_fails_validation(wrong_strategy, SnapshotConfig)
def test_invalid_missing_check_cols(basic_check_snapshot_config_dict):
wrong_fields = basic_check_snapshot_config_dict
del wrong_fields['check_cols']
with pytest.raises(ValidationError, match=r"A snapshot configured with the check strategy"):
SnapshotConfig.validate(wrong_fields)
def test_missing_snapshot_configs(basic_check_snapshot_config_dict):
wrong_fields = basic_check_snapshot_config_dict
del wrong_fields['strategy']
with pytest.raises(ValidationError, match=r"Snapshots must be configured with a 'strategy'"):
SnapshotConfig.validate(wrong_fields)
wrong_fields['strategy'] = 'timestamp'
del wrong_fields['unique_key']
with pytest.raises(ValidationError, match=r"Snapshots must be configured with a 'strategy'"):
SnapshotConfig.validate(wrong_fields)
wrong_fields['unique_key'] = 'id'
del wrong_fields['target_schema']
with pytest.raises(ValidationError, match=r"Snapshots must be configured with a 'strategy'"):
SnapshotConfig.validate(wrong_fields)
def test_invalid_check_value(basic_check_snapshot_config_dict):
invalid_check_type = basic_check_snapshot_config_dict
invalid_check_type['check_cols'] = 'some'
assert_fails_validation(invalid_check_type, SnapshotConfig)
@pytest.fixture
def basic_timestamp_snapshot_dict():
return {
'name': 'foo',
'root_path': '/root/',
'created_at': 1.0,
'resource_type': str(NodeType.Snapshot),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': []},
'deferred': False,
'database': 'test_db',
'description': '',
'schema': 'test_schema',
'alias': 'bar',
'tags': [],
'config': {
'column_types': {},
'enabled': True,
'materialized': 'snapshot',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'target_database': 'some_snapshot_db',
'target_schema': 'some_snapshot_schema',
'unique_key': 'id',
'strategy': 'timestamp',
'updated_at': 'last_update',
'on_schema_change': 'ignore',
'meta': {},
'grants': {},
},
'docs': {'show': True},
'columns': {},
'meta': {},
'checksum': {'name': 'sha256', 'checksum': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'},
'unrendered_config': {
'strategy': 'timestamp',
'unique_key': 'id',
'updated_at': 'last_update',
'target_database': 'some_snapshot_db',
'target_schema': 'some_snapshot_schema',
},
'config_call_dict': {},
}
@pytest.fixture
def basic_timestamp_snapshot_object():
return ParsedSnapshotNode(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from wherever',
name='foo',
resource_type=NodeType.Snapshot,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
description='',
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
config=SnapshotConfig(
strategy='timestamp',
unique_key='id',
updated_at='last_update',
target_database='some_snapshot_db',
target_schema='some_snapshot_schema',
),
checksum=FileHash.from_contents(''),
unrendered_config={
'strategy': 'timestamp',
'unique_key': 'id',
'updated_at': 'last_update',
'target_database': 'some_snapshot_db',
'target_schema': 'some_snapshot_schema',
},
)
@pytest.fixture
def basic_intermediate_timestamp_snapshot_object():
cfg = EmptySnapshotConfig()
cfg._extra.update({
'strategy': 'timestamp',
'unique_key': 'id',
'updated_at': 'last_update',
'target_database': 'some_snapshot_db',
'target_schema': 'some_snapshot_schema',
})
return IntermediateSnapshotNode(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from wherever',
name='foo',
resource_type=NodeType.Snapshot,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
description='',
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
config=cfg,
checksum=FileHash.from_contents(''),
created_at = 1,
unrendered_config={
'strategy': 'timestamp',
'unique_key': 'id',
'updated_at': 'last_update',
'target_database': 'some_snapshot_db',
'target_schema': 'some_snapshot_schema',
},
)
@pytest.fixture
def basic_check_snapshot_dict():
return {
'name': 'foo',
'root_path': '/root/',
'created_at': 1.0,
'resource_type': str(NodeType.Snapshot),
'path': '/root/x/path.sql',
'original_file_path': '/root/path.sql',
'package_name': 'test',
'raw_sql': 'select * from wherever',
'unique_id': 'model.test.foo',
'fqn': ['test', 'models', 'foo'],
'refs': [],
'sources': [],
'depends_on': {'macros': [], 'nodes': []},
'database': 'test_db',
'deferred': False,
'description': '',
'schema': 'test_schema',
'alias': 'bar',
'tags': [],
'config': {
'column_types': {},
'enabled': True,
'materialized': 'snapshot',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'quoting': {},
'tags': [],
'target_database': 'some_snapshot_db',
'target_schema': 'some_snapshot_schema',
'unique_key': 'id',
'strategy': 'check',
'check_cols': 'all',
'on_schema_change': 'ignore',
'meta': {},
'grants': {},
},
'docs': {'show': True},
'columns': {},
'meta': {},
'checksum': {'name': 'sha256', 'checksum': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'},
'unrendered_config': {
'target_database': 'some_snapshot_db',
'target_schema': 'some_snapshot_schema',
'unique_key': 'id',
'strategy': 'check',
'check_cols': 'all',
},
'config_call_dict': {},
}
@pytest.fixture
def basic_check_snapshot_object():
return ParsedSnapshotNode(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from wherever',
name='foo',
resource_type=NodeType.Snapshot,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
description='',
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
config=SnapshotConfig(
strategy='check',
unique_key='id',
check_cols='all',
target_database='some_snapshot_db',
target_schema='some_snapshot_schema',
),
checksum=FileHash.from_contents(''),
unrendered_config={
'target_database': 'some_snapshot_db',
'target_schema': 'some_snapshot_schema',
'unique_key': 'id',
'strategy': 'check',
'check_cols': 'all',
},
)
@pytest.fixture
def basic_intermediate_check_snapshot_object():
cfg = EmptySnapshotConfig()
cfg._extra.update({
'unique_key': 'id',
'strategy': 'check',
'check_cols': 'all',
'target_database': 'some_snapshot_db',
'target_schema': 'some_snapshot_schema',
})
return IntermediateSnapshotNode(
package_name='test',
root_path='/root/',
path='/root/x/path.sql',
original_file_path='/root/path.sql',
raw_sql='select * from wherever',
name='foo',
resource_type=NodeType.Snapshot,
unique_id='model.test.foo',
fqn=['test', 'models', 'foo'],
refs=[],
sources=[],
depends_on=DependsOn(),
description='',
database='test_db',
schema='test_schema',
alias='bar',
tags=[],
config=cfg,
checksum=FileHash.from_contents(''),
created_at = 1.0,
unrendered_config={
'target_database': 'some_snapshot_db',
'target_schema': 'some_snapshot_schema',
'unique_key': 'id',
'strategy': 'check',
'check_cols': 'all',
},
)
def test_timestamp_snapshot_ok(basic_timestamp_snapshot_dict, basic_timestamp_snapshot_object, basic_intermediate_timestamp_snapshot_object):
node_dict = basic_timestamp_snapshot_dict
node = basic_timestamp_snapshot_object
inter = basic_intermediate_timestamp_snapshot_object
assert_symmetric(node, node_dict, ParsedSnapshotNode)
# node_from_dict = ParsedSnapshotNode.from_dict(inter.to_dict(omit_none=True))
# node_from_dict.created_at = 1
assert ParsedSnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node
assert node.is_refable is True
assert node.is_ephemeral is False
pickle.loads(pickle.dumps(node))
def test_check_snapshot_ok(basic_check_snapshot_dict, basic_check_snapshot_object, basic_intermediate_check_snapshot_object):
node_dict = basic_check_snapshot_dict
node = basic_check_snapshot_object
inter = basic_intermediate_check_snapshot_object
assert_symmetric(node, node_dict, ParsedSnapshotNode)
assert ParsedSnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node
assert node.is_refable is True
assert node.is_ephemeral is False
pickle.loads(pickle.dumps(node))
def test_invalid_snapshot_bad_resource_type(basic_timestamp_snapshot_dict):
bad_resource_type = basic_timestamp_snapshot_dict
bad_resource_type['resource_type'] = str(NodeType.Model)
assert_fails_validation(bad_resource_type, ParsedSnapshotNode)
def test_basic_parsed_node_patch(basic_parsed_model_patch_object, basic_parsed_model_patch_dict):
assert_symmetric(basic_parsed_model_patch_object, basic_parsed_model_patch_dict)
@pytest.fixture
def populated_parsed_node_patch_dict():
return {
'name': 'foo',
'description': 'The foo model',
'original_file_path': 'path/to/schema.yml',
'columns': {
'a': {
'name': 'a',
'description': 'a text field',
'meta': {},
'tags': [],
},
},
'docs': {'show': False},
'meta': {'key': ['value']},
'yaml_key': 'models',
'package_name': 'test',
'config': {},
}
@pytest.fixture
def populated_parsed_node_patch_object():
return ParsedNodePatch(
name='foo',
description='The foo model',
original_file_path='path/to/schema.yml',
columns={'a': ColumnInfo(name='a', description='a text field', meta={})},
meta={'key': ['value']},
yaml_key='models',
package_name='test',
docs=Docs(show=False),
config={},
)
def test_populated_parsed_node_patch(populated_parsed_node_patch_dict, populated_parsed_node_patch_object):
assert_symmetric(populated_parsed_node_patch_object, populated_parsed_node_patch_dict)
class TestParsedMacro(ContractTestCase):
ContractType = ParsedMacro
def _ok_dict(self):
return {
'name': 'foo',
'path': '/root/path.sql',
'original_file_path': '/root/path.sql',
'created_at': 1.0,
'package_name': 'test',
'macro_sql': '{% macro foo() %}select 1 as id{% endmacro %}',
'root_path': '/root/',
'resource_type': 'macro',
'unique_id': 'macro.test.foo',
'tags': [],
'depends_on': {'macros': []},
'meta': {},
'description': 'my macro description',
'docs': {'show': True},
'arguments': [],
}
def test_ok(self):
macro_dict = self._ok_dict()
macro = self.ContractType(
name='foo',
path='/root/path.sql',
original_file_path='/root/path.sql',
package_name='test',
macro_sql='{% macro foo() %}select 1 as id{% endmacro %}',
root_path='/root/',
resource_type=NodeType.Macro,
unique_id='macro.test.foo',
tags=[],
depends_on=MacroDependsOn(),
meta={},
description='my macro description',
arguments=[],
)
assert_symmetric(macro, macro_dict)
pickle.loads(pickle.dumps(macro))
def test_invalid_missing_unique_id(self):
bad_missing_uid = self._ok_dict()
del bad_missing_uid['unique_id']
self.assert_fails_validation(bad_missing_uid)
def test_invalid_extra_field(self):
bad_extra_field = self._ok_dict()
bad_extra_field['extra'] = 'too many fields'
self.assert_fails_validation(bad_extra_field)
class TestParsedDocumentation(ContractTestCase):
ContractType = ParsedDocumentation
def _ok_dict(self):
return {
'block_contents': 'some doc contents',
'name': 'foo',
'original_file_path': '/root/docs/doc.md',
'package_name': 'test',
'path': '/root/docs',
'root_path': '/root',
'unique_id': 'test.foo',
}
def test_ok(self):
doc_dict = self._ok_dict()
doc = self.ContractType(
package_name='test',
root_path='/root',
path='/root/docs',
original_file_path='/root/docs/doc.md',
name='foo',
unique_id='test.foo',
block_contents='some doc contents'
)
self.assert_symmetric(doc, doc_dict)
pickle.loads(pickle.dumps(doc))
def test_invalid_missing(self):
bad_missing_contents = self._ok_dict()
del bad_missing_contents['block_contents']
self.assert_fails_validation(bad_missing_contents)
def test_invalid_extra(self):
bad_extra_field = self._ok_dict()
bad_extra_field['extra'] = 'more'
self.assert_fails_validation(bad_extra_field)
@pytest.fixture
def minimum_parsed_source_definition_dict():
return {
'package_name': 'test',
'root_path': '/root',
'path': '/root/models/sources.yml',
'original_file_path': '/root/models/sources.yml',
'created_at': 1.0,
'database': 'some_db',
'schema': 'some_schema',
'fqn': ['test', 'source', 'my_source', 'my_source_table'],
'source_name': 'my_source',
'name': 'my_source_table',
'source_description': 'my source description',
'loader': 'stitch',
'identifier': 'my_source_table',
'resource_type': str(NodeType.Source),
'unique_id': 'test.source.my_source.my_source_table',
}
@pytest.fixture
def basic_parsed_source_definition_dict():
return {
'package_name': 'test',
'root_path': '/root',
'path': '/root/models/sources.yml',
'original_file_path': '/root/models/sources.yml',
'created_at': 1.0,
'database': 'some_db',
'schema': 'some_schema',
'fqn': ['test', 'source', 'my_source', 'my_source_table'],
'source_name': 'my_source',
'name': 'my_source_table',
'source_description': 'my source description',
'loader': 'stitch',
'identifier': 'my_source_table',
'resource_type': str(NodeType.Source),
'description': '',
'columns': {},
'quoting': {},
'unique_id': 'test.source.my_source.my_source_table',
'meta': {},
'source_meta': {},
'tags': [],
'config': {
'enabled': True,
},
'unrendered_config': {},
}
@pytest.fixture
def basic_parsed_source_definition_object():
return ParsedSourceDefinition(
columns={},
database='some_db',
description='',
fqn=['test', 'source', 'my_source', 'my_source_table'],
identifier='my_source_table',
loader='stitch',
name='my_source_table',
original_file_path='/root/models/sources.yml',
package_name='test',
path='/root/models/sources.yml',
quoting=Quoting(),
resource_type=NodeType.Source,
root_path='/root',
schema='some_schema',
source_description='my source description',
source_name='my_source',
unique_id='test.source.my_source.my_source_table',
tags=[],
config=SourceConfig(),
)
@pytest.fixture
def complex_parsed_source_definition_dict():
return {
'package_name': 'test',
'root_path': '/root',
'path': '/root/models/sources.yml',
'original_file_path': '/root/models/sources.yml',
'created_at': 1.0,
'database': 'some_db',
'schema': 'some_schema',
'fqn': ['test', 'source', 'my_source', 'my_source_table'],
'source_name': 'my_source',
'name': 'my_source_table',
'source_description': 'my source description',
'loader': 'stitch',
'identifier': 'my_source_table',
'resource_type': str(NodeType.Source),
'description': '',
'columns': {},
'quoting': {},
'unique_id': 'test.source.my_source.my_source_table',
'meta': {},
'source_meta': {},
'tags': ['my_tag'],
'config': {
'enabled': True,
},
'freshness': {
'warn_after': {'period': 'hour', 'count': 1},
'error_after': {}
},
'loaded_at_field': 'loaded_at',
'unrendered_config': {},
}
@pytest.fixture
def complex_parsed_source_definition_object():
return ParsedSourceDefinition(
columns={},
database='some_db',
description='',
fqn=['test', 'source', 'my_source', 'my_source_table'],
identifier='my_source_table',
loader='stitch',
name='my_source_table',
original_file_path='/root/models/sources.yml',
package_name='test',
path='/root/models/sources.yml',
quoting=Quoting(),
resource_type=NodeType.Source,
root_path='/root',
schema='some_schema',
source_description='my source description',
source_name='my_source',
unique_id='test.source.my_source.my_source_table',
tags=['my_tag'],
config=SourceConfig(),
freshness=FreshnessThreshold(warn_after=Time(period=TimePeriod.hour, count=1)),
loaded_at_field='loaded_at',
)
def test_basic_source_definition(minimum_parsed_source_definition_dict, basic_parsed_source_definition_dict, basic_parsed_source_definition_object):
node = basic_parsed_source_definition_object
node_dict = basic_parsed_source_definition_dict
minimum = minimum_parsed_source_definition_dict
assert_symmetric(node, node_dict, ParsedSourceDefinition)
assert node.is_ephemeral is False
assert node.is_refable is False
assert node.has_freshness is False
assert_from_dict(node, minimum, ParsedSourceDefinition)
pickle.loads(pickle.dumps(node))
def test_invalid_missing(minimum_parsed_source_definition_dict):
bad_missing_name = minimum_parsed_source_definition_dict
del bad_missing_name['name']
assert_fails_validation(bad_missing_name, ParsedSourceDefinition)
def test_invalid_bad_resource_type(minimum_parsed_source_definition_dict):
bad_resource_type = minimum_parsed_source_definition_dict
bad_resource_type['resource_type'] = str(NodeType.Model)
assert_fails_validation(bad_resource_type, ParsedSourceDefinition)
def test_complex_source_definition(complex_parsed_source_definition_dict, complex_parsed_source_definition_object):
node = complex_parsed_source_definition_object
node_dict = complex_parsed_source_definition_dict
assert_symmetric(node, node_dict, ParsedSourceDefinition)
assert node.is_ephemeral is False
assert node.is_refable is False
assert node.has_freshness is True
pickle.loads(pickle.dumps(node))
def test_source_no_loaded_at(complex_parsed_source_definition_object):
node = complex_parsed_source_definition_object
assert node.has_freshness is True
# no loaded_at_field -> does not have freshness
node.loaded_at_field = None
assert node.has_freshness is False
def test_source_no_freshness(complex_parsed_source_definition_object):
node = complex_parsed_source_definition_object
assert node.has_freshness is True
node.freshness = None
assert node.has_freshness is False
unchanged_source_definitions = [
lambda u: (u, u.replace(tags=['mytag'])),
lambda u: (u, u.replace(meta={'a': 1000})),
]
changed_source_definitions = [
lambda u: (u, u.replace(freshness=FreshnessThreshold(warn_after=Time(period=TimePeriod.hour, count=1)), loaded_at_field='loaded_at')),
lambda u: (u, u.replace(loaded_at_field='loaded_at')),
lambda u: (u, u.replace(freshness=FreshnessThreshold(error_after=Time(period=TimePeriod.hour, count=1)))),
lambda u: (u, u.replace(quoting=Quoting(identifier=True))),
lambda u: (u, u.replace(database='other_database')),
lambda u: (u, u.replace(schema='other_schema')),
lambda u: (u, u.replace(identifier='identifier')),
]
@pytest.mark.parametrize('func', unchanged_source_definitions)
def test_compare_unchanged_parsed_source_definition(func, basic_parsed_source_definition_object):
node, compare = func(basic_parsed_source_definition_object)
assert node.same_contents(compare)
@pytest.mark.parametrize('func', changed_source_definitions)
def test_compare_changed_source_definition(func, basic_parsed_source_definition_object):
node, compare = func(basic_parsed_source_definition_object)
assert not node.same_contents(compare)
@pytest.fixture
def minimal_parsed_exposure_dict():
return {
'name': 'my_exposure',
'type': 'notebook',
'owner': {
'email': 'test@example.com',
},
'fqn': ['test', 'exposures', 'my_exposure'],
'unique_id': 'exposure.test.my_exposure',
'package_name': 'test',
'meta': {},
'tags': [],
'path': 'models/something.yml',
'root_path': '/usr/src/app',
'original_file_path': 'models/something.yml',
'description': '',
'created_at': 1.0,
}
@pytest.fixture
def basic_parsed_exposure_dict():
return {
'name': 'my_exposure',
'type': 'notebook',
'owner': {
'email': 'test@example.com',
},
'resource_type': 'exposure',
'depends_on': {
'nodes': [],
'macros': [],
},
'refs': [],
'sources': [],
'fqn': ['test', 'exposures', 'my_exposure'],
'unique_id': 'exposure.test.my_exposure',
'package_name': 'test',
'path': 'models/something.yml',
'root_path': '/usr/src/app',
'original_file_path': 'models/something.yml',
'description': '',
'meta': {},
'tags': [],
'created_at': 1.0,
}
@pytest.fixture
def basic_parsed_exposure_object():
return ParsedExposure(
name='my_exposure',
type=ExposureType.Notebook,
fqn=['test', 'exposures', 'my_exposure'],
unique_id='exposure.test.my_exposure',
package_name='test',
path='models/something.yml',
root_path='/usr/src/app',
original_file_path='models/something.yml',
owner=ExposureOwner(email='test@example.com'),
description='',
meta={},
tags=[]
)
@pytest.fixture
def complex_parsed_exposure_dict():
return {
'name': 'my_exposure',
'type': 'analysis',
'created_at': 1.0,
'owner': {
'email': 'test@example.com',
'name': 'A Name',
},
'resource_type': 'exposure',
'maturity': 'low',
'url': 'https://example.com/analyses/1',
'description': 'my description',
'meta': {
'tool': 'my_tool',
'is_something': False
},
'tags': ['my_department'],
'depends_on': {
'nodes': ['models.test.my_model'],
'macros': [],
},
'refs': [],
'sources': [],
'fqn': ['test', 'exposures', 'my_exposure'],
'unique_id': 'exposure.test.my_exposure',
'package_name': 'test',
'path': 'models/something.yml',
'root_path': '/usr/src/app',
'original_file_path': 'models/something.yml',
}
@pytest.fixture
def complex_parsed_exposure_object():
return ParsedExposure(
name='my_exposure',
type=ExposureType.Analysis,
owner=ExposureOwner(email='test@example.com', name='A Name'),
maturity=MaturityType.Low,
url='https://example.com/analyses/1',
description='my description',
meta={'tool': 'my_tool', 'is_something': False},
tags=['my_department'],
depends_on=DependsOn(nodes=['models.test.my_model']),
fqn=['test', 'exposures', 'my_exposure'],
unique_id='exposure.test.my_exposure',
package_name='test',
path='models/something.yml',
root_path='/usr/src/app',
original_file_path='models/something.yml',
)
def test_basic_parsed_exposure(minimal_parsed_exposure_dict, basic_parsed_exposure_dict, basic_parsed_exposure_object):
assert_symmetric(basic_parsed_exposure_object, basic_parsed_exposure_dict, ParsedExposure)
assert_from_dict(basic_parsed_exposure_object, minimal_parsed_exposure_dict, ParsedExposure)
pickle.loads(pickle.dumps(basic_parsed_exposure_object))
def test_complex_parsed_exposure(complex_parsed_exposure_dict, complex_parsed_exposure_object):
assert_symmetric(complex_parsed_exposure_object, complex_parsed_exposure_dict, ParsedExposure)
unchanged_parsed_exposures = [
lambda u: (u, u),
]
changed_parsed_exposures = [
lambda u: (u, u.replace(fqn=u.fqn[:-1]+['something', u.fqn[-1]])),
lambda u: (u, u.replace(type=ExposureType.ML)),
lambda u: (u, u.replace(owner=u.owner.replace(name='My Name'))),
lambda u: (u, u.replace(maturity=MaturityType.Medium)),
lambda u: (u, u.replace(url='https://example.com/dashboard/1')),
lambda u: (u, u.replace(description='My description')),
lambda u: (u, u.replace(depends_on=DependsOn(nodes=['model.test.blah']))),
]
@pytest.mark.parametrize('func', unchanged_parsed_exposures)
def test_compare_unchanged_parsed_exposure(func, basic_parsed_exposure_object):
node, compare = func(basic_parsed_exposure_object)
assert node.same_contents(compare)
@pytest.mark.parametrize('func', changed_parsed_exposures)
def test_compare_changed_exposure(func, basic_parsed_exposure_object):
node, compare = func(basic_parsed_exposure_object)
assert not node.same_contents(compare)
# METRICS
@pytest.fixture
def minimal_parsed_metric_dict():
return {
'name': 'my_metric',
'type': 'count',
'timestamp': 'created_at',
'time_grains': ['day'],
'fqn': ['test', 'metrics', 'my_metric'],
'unique_id': 'metric.test.my_metric',
'package_name': 'test',
'meta': {},
'tags': [],
'path': 'models/something.yml',
'root_path': '/usr/src/app',
'original_file_path': 'models/something.yml',
'description': '',
'created_at': 1.0,
}
@pytest.fixture
def basic_parsed_metric_dict():
return {
'name': 'new_customers',
'label': 'New Customers',
'model': 'ref("dim_customers")',
'type': 'count',
'sql': 'user_id',
'timestamp': 'signup_date',
'time_grains': ['day', 'week', 'month'],
'dimensions': ['plan', 'country'],
'filters': [
{
"field": "is_paying",
"value": "true",
"operator": "=",
}
],
'resource_type': 'metric',
'refs': [['dim_customers']],
'sources': [],
'fqn': ['test', 'metrics', 'my_metric'],
'unique_id': 'metric.test.my_metric',
'package_name': 'test',
'path': 'models/something.yml',
'root_path': '/usr/src/app',
'original_file_path': 'models/something.yml',
'description': '',
'meta': {},
'tags': [],
'created_at': 1.0,
'depends_on': {
'nodes': [],
'macros': [],
},
}
@pytest.fixture
def basic_parsed_metric_object():
return ParsedMetric(
name='my_metric',
type='count',
fqn=['test', 'metrics', 'my_metric'],
unique_id='metric.test.my_metric',
package_name='test',
path='models/something.yml',
root_path='/usr/src/app',
original_file_path='models/something.yml',
description='',
meta={},
tags=[]
)
| 31.71466 | 150 | 0.588582 |
7d1ecb6269c2635317ded2408ccaa32503769c55 | 1,688 | py | Python | setup.py | t2y/openapi-ext-tools | 1253053af4f9a90f85b611e79a8f39c7d226a002 | [
"Apache-2.0"
] | 1 | 2020-08-30T07:47:57.000Z | 2020-08-30T07:47:57.000Z | setup.py | t2y/openapi-ext-tools | 1253053af4f9a90f85b611e79a8f39c7d226a002 | [
"Apache-2.0"
] | null | null | null | setup.py | t2y/openapi-ext-tools | 1253053af4f9a90f85b611e79a8f39c7d226a002 | [
"Apache-2.0"
] | null | null | null | import re
from os import path
from setuptools import setup
version_py = open('openapi/spec/ext/__init__.py').read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", version_py))
desc = 'Extended tools for openapi spec'
cur_dir = path.abspath(path.dirname(__file__))
with open(path.join(cur_dir, 'README.md')) as f:
long_description = f.read()
setup(
name='openapi-ext-tools',
version=metadata['version'],
description=desc,
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Environment :: Console',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
],
url='https://github.com/t2y/openapi-ext-tools',
license='Apache License 2.0',
author='Tetsuya Morimoto',
author_email='tetsuya.morimoto@gmail.com',
zip_safe=False,
platforms='any',
packages=['openapi'],
namespace_packages=['openapi'],
include_package_data=True,
install_requires=[
'PyYAML',
'openapi-spec-validator',
],
tests_require=[
'tox', 'pytest', 'pytest-pep8', 'pytest-flakes',
],
entry_points = {
'console_scripts': [
'openapi-spec-cli=openapi.spec.ext.cli.main:main',
],
},
)
| 31.259259 | 70 | 0.626185 |
7b38e5ea93000b33f5bd4e009e49aec1421f5b71 | 29,871 | py | Python | venv/Lib/site-packages/networkx/algorithms/dag.py | amelliaaas/tugastkc4 | f442382c72379e911f3780543b95345a3b1c9407 | [
"Apache-2.0"
] | 5 | 2022-01-05T00:41:46.000Z | 2022-03-21T07:22:58.000Z | venv/Lib/site-packages/networkx/algorithms/dag.py | amelliaaas/tugastkc4 | f442382c72379e911f3780543b95345a3b1c9407 | [
"Apache-2.0"
] | 25 | 2021-04-17T09:26:47.000Z | 2022-01-02T20:06:55.000Z | venv/Lib/site-packages/networkx/algorithms/dag.py | amelliaaas/tugastkc4 | f442382c72379e911f3780543b95345a3b1c9407 | [
"Apache-2.0"
] | 20 | 2021-11-07T13:55:56.000Z | 2021-12-02T10:54:01.000Z | """Algorithms for directed acyclic graphs (DAGs).
Note that most of these functions are only guaranteed to work for DAGs.
In general, these functions do not check for acyclic-ness, so it is up
to the user to check for that.
"""
from collections import deque
from math import gcd
from functools import partial
from itertools import chain, product, starmap
import heapq
import networkx as nx
from networkx.utils import arbitrary_element, pairwise, not_implemented_for
__all__ = [
"descendants",
"ancestors",
"topological_sort",
"lexicographical_topological_sort",
"all_topological_sorts",
"topological_generations",
"is_directed_acyclic_graph",
"is_aperiodic",
"transitive_closure",
"transitive_closure_dag",
"transitive_reduction",
"antichains",
"dag_longest_path",
"dag_longest_path_length",
"dag_to_branching",
]
chaini = chain.from_iterable
def descendants(G, source):
"""Returns all nodes reachable from `source` in `G`.
Parameters
----------
G : NetworkX DiGraph
A directed graph
source : node in `G`
Returns
-------
set()
The descendants of `source` in `G`
"""
if not G.has_node(source):
raise nx.NetworkXError(f"The node {source} is not in the graph.")
des = {n for n, d in nx.shortest_path_length(G, source=source).items()}
return des - {source}
def ancestors(G, source):
"""Returns all nodes having a path to `source` in `G`.
Parameters
----------
G : NetworkX DiGraph
A directed graph
source : node in `G`
Returns
-------
set()
The ancestors of source in G
"""
if not G.has_node(source):
raise nx.NetworkXError(f"The node {source} is not in the graph.")
anc = {n for n, d in nx.shortest_path_length(G, target=source).items()}
return anc - {source}
def has_cycle(G):
"""Decides whether the directed graph has a cycle."""
try:
# Feed the entire iterator into a zero-length deque.
deque(topological_sort(G), maxlen=0)
except nx.NetworkXUnfeasible:
return True
else:
return False
def is_directed_acyclic_graph(G):
"""Returns True if the graph `G` is a directed acyclic graph (DAG) or
False if not.
Parameters
----------
G : NetworkX graph
Returns
-------
bool
True if `G` is a DAG, False otherwise
"""
return G.is_directed() and not has_cycle(G)
def topological_generations(G):
"""Stratifies a DAG into generations.
A topological generation is node collection in which ancestors of a node in each
generation are guaranteed to be in a previous generation, and any descendants of
a node are guaranteed to be in a following generation. Nodes are guaranteed to
be in the earliest possible generation that they can belong to.
Parameters
----------
G : NetworkX digraph
A directed acyclic graph (DAG)
Yields
------
sets of nodes
Yields sets of nodes representing each generation.
Raises
------
NetworkXError
Generations are defined for directed graphs only. If the graph
`G` is undirected, a :exc:`NetworkXError` is raised.
NetworkXUnfeasible
If `G` is not a directed acyclic graph (DAG) no topological generations
exist and a :exc:`NetworkXUnfeasible` exception is raised. This can also
be raised if `G` is changed while the returned iterator is being processed
RuntimeError
If `G` is changed while the returned iterator is being processed.
Examples
--------
>>> DG = nx.DiGraph([(2, 1), (3, 1)])
>>> [sorted(generation) for generation in nx.topological_generations(DG)]
[[2, 3], [1]]
Notes
-----
The generation in which a node resides can also be determined by taking the
max-path-distance from the node to the farthest leaf node. That value can
be obtained with this function using `enumerate(topological_generations(G))`.
See also
--------
topological_sort
"""
if not G.is_directed():
raise nx.NetworkXError("Topological sort not defined on undirected graphs.")
multigraph = G.is_multigraph()
indegree_map = {v: d for v, d in G.in_degree() if d > 0}
zero_indegree = [v for v, d in G.in_degree() if d == 0]
while zero_indegree:
this_generation = zero_indegree
zero_indegree = []
for node in this_generation:
if node not in G:
raise RuntimeError("Graph changed during iteration")
for child in G.neighbors(node):
try:
indegree_map[child] -= len(G[node][child]) if multigraph else 1
except KeyError as e:
raise RuntimeError("Graph changed during iteration") from e
if indegree_map[child] == 0:
zero_indegree.append(child)
del indegree_map[child]
yield this_generation
if indegree_map:
raise nx.NetworkXUnfeasible(
"Graph contains a cycle or graph changed during iteration"
)
def topological_sort(G):
"""Returns a generator of nodes in topologically sorted order.
A topological sort is a nonunique permutation of the nodes of a
directed graph such that an edge from u to v implies that u
appears before v in the topological sort order. This ordering is
valid only if the graph has no directed cycles.
Parameters
----------
G : NetworkX digraph
A directed acyclic graph (DAG)
Yields
------
nodes
Yields the nodes in topological sorted order.
Raises
------
NetworkXError
Topological sort is defined for directed graphs only. If the graph `G`
is undirected, a :exc:`NetworkXError` is raised.
NetworkXUnfeasible
If `G` is not a directed acyclic graph (DAG) no topological sort exists
and a :exc:`NetworkXUnfeasible` exception is raised. This can also be
raised if `G` is changed while the returned iterator is being processed
RuntimeError
If `G` is changed while the returned iterator is being processed.
Examples
--------
To get the reverse order of the topological sort:
>>> DG = nx.DiGraph([(1, 2), (2, 3)])
>>> list(reversed(list(nx.topological_sort(DG))))
[3, 2, 1]
If your DiGraph naturally has the edges representing tasks/inputs
and nodes representing people/processes that initiate tasks, then
topological_sort is not quite what you need. You will have to change
the tasks to nodes with dependence reflected by edges. The result is
a kind of topological sort of the edges. This can be done
with :func:`networkx.line_graph` as follows:
>>> list(nx.topological_sort(nx.line_graph(DG)))
[(1, 2), (2, 3)]
Notes
-----
This algorithm is based on a description and proof in
"Introduction to Algorithms: A Creative Approach" [1]_ .
See also
--------
is_directed_acyclic_graph, lexicographical_topological_sort
References
----------
.. [1] Manber, U. (1989).
*Introduction to Algorithms - A Creative Approach.* Addison-Wesley.
"""
for generation in nx.topological_generations(G):
yield from generation
def lexicographical_topological_sort(G, key=None):
"""Returns a generator of nodes in lexicographically topologically sorted
order.
A topological sort is a nonunique permutation of the nodes such that an
edge from u to v implies that u appears before v in the topological sort
order.
Parameters
----------
G : NetworkX digraph
A directed acyclic graph (DAG)
key : function, optional
This function maps nodes to keys with which to resolve ambiguities in
the sort order. Defaults to the identity function.
Returns
-------
iterable
An iterable of node names in lexicographical topological sort order.
Raises
------
NetworkXError
Topological sort is defined for directed graphs only. If the graph `G`
is undirected, a :exc:`NetworkXError` is raised.
NetworkXUnfeasible
If `G` is not a directed acyclic graph (DAG) no topological sort exists
and a :exc:`NetworkXUnfeasible` exception is raised. This can also be
raised if `G` is changed while the returned iterator is being processed
RuntimeError
If `G` is changed while the returned iterator is being processed.
Notes
-----
This algorithm is based on a description and proof in
"Introduction to Algorithms: A Creative Approach" [1]_ .
See also
--------
topological_sort
References
----------
.. [1] Manber, U. (1989).
*Introduction to Algorithms - A Creative Approach.* Addison-Wesley.
"""
if not G.is_directed():
msg = "Topological sort not defined on undirected graphs."
raise nx.NetworkXError(msg)
if key is None:
def key(node):
return node
nodeid_map = {n: i for i, n in enumerate(G)}
def create_tuple(node):
return key(node), nodeid_map[node], node
indegree_map = {v: d for v, d in G.in_degree() if d > 0}
# These nodes have zero indegree and ready to be returned.
zero_indegree = [create_tuple(v) for v, d in G.in_degree() if d == 0]
heapq.heapify(zero_indegree)
while zero_indegree:
_, _, node = heapq.heappop(zero_indegree)
if node not in G:
raise RuntimeError("Graph changed during iteration")
for _, child in G.edges(node):
try:
indegree_map[child] -= 1
except KeyError as e:
raise RuntimeError("Graph changed during iteration") from e
if indegree_map[child] == 0:
heapq.heappush(zero_indegree, create_tuple(child))
del indegree_map[child]
yield node
if indegree_map:
msg = "Graph contains a cycle or graph changed during iteration"
raise nx.NetworkXUnfeasible(msg)
@not_implemented_for("undirected")
def all_topological_sorts(G):
"""Returns a generator of _all_ topological sorts of the directed graph G.
A topological sort is a nonunique permutation of the nodes such that an
edge from u to v implies that u appears before v in the topological sort
order.
Parameters
----------
G : NetworkX DiGraph
A directed graph
Returns
-------
generator
All topological sorts of the digraph G
Raises
------
NetworkXNotImplemented
If `G` is not directed
NetworkXUnfeasible
If `G` is not acyclic
Examples
--------
To enumerate all topological sorts of directed graph:
>>> DG = nx.DiGraph([(1, 2), (2, 3), (2, 4)])
>>> list(nx.all_topological_sorts(DG))
[[1, 2, 4, 3], [1, 2, 3, 4]]
Notes
-----
Implements an iterative version of the algorithm given in [1].
References
----------
.. [1] Knuth, Donald E., Szwarcfiter, Jayme L. (1974).
"A Structured Program to Generate All Topological Sorting Arrangements"
Information Processing Letters, Volume 2, Issue 6, 1974, Pages 153-157,
ISSN 0020-0190,
https://doi.org/10.1016/0020-0190(74)90001-5.
Elsevier (North-Holland), Amsterdam
"""
if not G.is_directed():
raise nx.NetworkXError("Topological sort not defined on undirected graphs.")
# the names of count and D are chosen to match the global variables in [1]
# number of edges originating in a vertex v
count = dict(G.in_degree())
# vertices with indegree 0
D = deque([v for v, d in G.in_degree() if d == 0])
# stack of first value chosen at a position k in the topological sort
bases = []
current_sort = []
# do-while construct
while True:
assert all([count[v] == 0 for v in D])
if len(current_sort) == len(G):
yield list(current_sort)
# clean-up stack
while len(current_sort) > 0:
assert len(bases) == len(current_sort)
q = current_sort.pop()
# "restores" all edges (q, x)
# NOTE: it is important to iterate over edges instead
# of successors, so count is updated correctly in multigraphs
for _, j in G.out_edges(q):
count[j] += 1
assert count[j] >= 0
# remove entries from D
while len(D) > 0 and count[D[-1]] > 0:
D.pop()
# corresponds to a circular shift of the values in D
# if the first value chosen (the base) is in the first
# position of D again, we are done and need to consider the
# previous condition
D.appendleft(q)
if D[-1] == bases[-1]:
# all possible values have been chosen at current position
# remove corresponding marker
bases.pop()
else:
# there are still elements that have not been fixed
# at the current position in the topological sort
# stop removing elements, escape inner loop
break
else:
if len(D) == 0:
raise nx.NetworkXUnfeasible("Graph contains a cycle.")
# choose next node
q = D.pop()
# "erase" all edges (q, x)
# NOTE: it is important to iterate over edges instead
# of successors, so count is updated correctly in multigraphs
for _, j in G.out_edges(q):
count[j] -= 1
assert count[j] >= 0
if count[j] == 0:
D.append(j)
current_sort.append(q)
# base for current position might _not_ be fixed yet
if len(bases) < len(current_sort):
bases.append(q)
if len(bases) == 0:
break
def is_aperiodic(G):
"""Returns True if `G` is aperiodic.
A directed graph is aperiodic if there is no integer k > 1 that
divides the length of every cycle in the graph.
Parameters
----------
G : NetworkX DiGraph
A directed graph
Returns
-------
bool
True if the graph is aperiodic False otherwise
Raises
------
NetworkXError
If `G` is not directed
Notes
-----
This uses the method outlined in [1]_, which runs in $O(m)$ time
given $m$ edges in `G`. Note that a graph is not aperiodic if it is
acyclic as every integer trivial divides length 0 cycles.
References
----------
.. [1] Jarvis, J. P.; Shier, D. R. (1996),
"Graph-theoretic analysis of finite Markov chains,"
in Shier, D. R.; Wallenius, K. T., Applied Mathematical Modeling:
A Multidisciplinary Approach, CRC Press.
"""
if not G.is_directed():
raise nx.NetworkXError("is_aperiodic not defined for undirected graphs")
s = arbitrary_element(G)
levels = {s: 0}
this_level = [s]
g = 0
lev = 1
while this_level:
next_level = []
for u in this_level:
for v in G[u]:
if v in levels: # Non-Tree Edge
g = gcd(g, levels[u] - levels[v] + 1)
else: # Tree Edge
next_level.append(v)
levels[v] = lev
this_level = next_level
lev += 1
if len(levels) == len(G): # All nodes in tree
return g == 1
else:
return g == 1 and nx.is_aperiodic(G.subgraph(set(G) - set(levels)))
@not_implemented_for("undirected")
def transitive_closure(G, reflexive=False):
"""Returns transitive closure of a directed graph
The transitive closure of G = (V,E) is a graph G+ = (V,E+) such that
for all v, w in V there is an edge (v, w) in E+ if and only if there
is a path from v to w in G.
Handling of paths from v to v has some flexibility within this definition.
A reflexive transitive closure creates a self-loop for the path
from v to v of length 0. The usual transitive closure creates a
self-loop only if a cycle exists (a path from v to v with length > 0).
We also allow an option for no self-loops.
Parameters
----------
G : NetworkX DiGraph
A directed graph
reflexive : Bool or None, optional (default: False)
Determines when cycles create self-loops in the Transitive Closure.
If True, trivial cycles (length 0) create self-loops. The result
is a reflexive tranistive closure of G.
If False (the default) non-trivial cycles create self-loops.
If None, self-loops are not created.
Returns
-------
NetworkX DiGraph
The transitive closure of `G`
Raises
------
NetworkXNotImplemented
If `G` is not directed
References
----------
.. [1] http://www.ics.uci.edu/~eppstein/PADS/PartialOrder.py
TODO this function applies to all directed graphs and is probably misplaced
here in dag.py
"""
if reflexive is None:
TC = G.copy()
for v in G:
edges = ((v, u) for u in nx.dfs_preorder_nodes(G, v) if v != u)
TC.add_edges_from(edges)
return TC
if reflexive is True:
TC = G.copy()
for v in G:
edges = ((v, u) for u in nx.dfs_preorder_nodes(G, v))
TC.add_edges_from(edges)
return TC
# reflexive is False
TC = G.copy()
for v in G:
edges = ((v, w) for u, w in nx.edge_dfs(G, v))
TC.add_edges_from(edges)
return TC
@not_implemented_for("undirected")
def transitive_closure_dag(G, topo_order=None):
"""Returns the transitive closure of a directed acyclic graph.
This function is faster than the function `transitive_closure`, but fails
if the graph has a cycle.
The transitive closure of G = (V,E) is a graph G+ = (V,E+) such that
for all v, w in V there is an edge (v, w) in E+ if and only if there
is a non-null path from v to w in G.
Parameters
----------
G : NetworkX DiGraph
A directed acyclic graph (DAG)
topo_order: list or tuple, optional
A topological order for G (if None, the function will compute one)
Returns
-------
NetworkX DiGraph
The transitive closure of `G`
Raises
------
NetworkXNotImplemented
If `G` is not directed
NetworkXUnfeasible
If `G` has a cycle
Notes
-----
This algorithm is probably simple enough to be well-known but I didn't find
a mention in the literature.
"""
if topo_order is None:
topo_order = list(topological_sort(G))
TC = G.copy()
# idea: traverse vertices following a reverse topological order, connecting
# each vertex to its descendants at distance 2 as we go
for v in reversed(topo_order):
TC.add_edges_from((v, u) for u in nx.descendants_at_distance(TC, v, 2))
return TC
@not_implemented_for("undirected")
def transitive_reduction(G):
"""Returns transitive reduction of a directed graph
The transitive reduction of G = (V,E) is a graph G- = (V,E-) such that
for all v,w in V there is an edge (v,w) in E- if and only if (v,w) is
in E and there is no path from v to w in G with length greater than 1.
Parameters
----------
G : NetworkX DiGraph
A directed acyclic graph (DAG)
Returns
-------
NetworkX DiGraph
The transitive reduction of `G`
Raises
------
NetworkXError
If `G` is not a directed acyclic graph (DAG) transitive reduction is
not uniquely defined and a :exc:`NetworkXError` exception is raised.
Examples
--------
To perform transitive reduction on a DiGraph:
>>> DG = nx.DiGraph([(1, 2), (2, 3), (1, 3)])
>>> TR = nx.transitive_reduction(DG)
>>> list(TR.edges)
[(1, 2), (2, 3)]
To avoid unnecessary data copies, this implementation does not return a
DiGraph with node/edge data.
To perform transitive reduction on a DiGraph and transfer node/edge data:
>>> DG = nx.DiGraph()
>>> DG.add_edges_from([(1, 2), (2, 3), (1, 3)], color='red')
>>> TR = nx.transitive_reduction(DG)
>>> TR.add_nodes_from(DG.nodes(data=True))
>>> TR.add_edges_from((u, v, DG.edges[u, v]) for u, v in TR.edges)
>>> list(TR.edges(data=True))
[(1, 2, {'color': 'red'}), (2, 3, {'color': 'red'})]
References
----------
https://en.wikipedia.org/wiki/Transitive_reduction
"""
if not is_directed_acyclic_graph(G):
msg = "Directed Acyclic Graph required for transitive_reduction"
raise nx.NetworkXError(msg)
TR = nx.DiGraph()
TR.add_nodes_from(G.nodes())
descendants = {}
# count before removing set stored in descendants
check_count = dict(G.in_degree)
for u in G:
u_nbrs = set(G[u])
for v in G[u]:
if v in u_nbrs:
if v not in descendants:
descendants[v] = {y for x, y in nx.dfs_edges(G, v)}
u_nbrs -= descendants[v]
check_count[v] -= 1
if check_count[v] == 0:
del descendants[v]
TR.add_edges_from((u, v) for v in u_nbrs)
return TR
@not_implemented_for("undirected")
def antichains(G, topo_order=None):
"""Generates antichains from a directed acyclic graph (DAG).
An antichain is a subset of a partially ordered set such that any
two elements in the subset are incomparable.
Parameters
----------
G : NetworkX DiGraph
A directed acyclic graph (DAG)
topo_order: list or tuple, optional
A topological order for G (if None, the function will compute one)
Returns
-------
generator object
Raises
------
NetworkXNotImplemented
If `G` is not directed
NetworkXUnfeasible
If `G` contains a cycle
Notes
-----
This function was originally developed by Peter Jipsen and Franco Saliola
for the SAGE project. It's included in NetworkX with permission from the
authors. Original SAGE code at:
https://github.com/sagemath/sage/blob/master/src/sage/combinat/posets/hasse_diagram.py
References
----------
.. [1] Free Lattices, by R. Freese, J. Jezek and J. B. Nation,
AMS, Vol 42, 1995, p. 226.
"""
if topo_order is None:
topo_order = list(nx.topological_sort(G))
TC = nx.transitive_closure_dag(G, topo_order)
antichains_stacks = [([], list(reversed(topo_order)))]
while antichains_stacks:
(antichain, stack) = antichains_stacks.pop()
# Invariant:
# - the elements of antichain are independent
# - the elements of stack are independent from those of antichain
yield antichain
while stack:
x = stack.pop()
new_antichain = antichain + [x]
new_stack = [t for t in stack if not ((t in TC[x]) or (x in TC[t]))]
antichains_stacks.append((new_antichain, new_stack))
@not_implemented_for("undirected")
def dag_longest_path(G, weight="weight", default_weight=1, topo_order=None):
"""Returns the longest path in a directed acyclic graph (DAG).
If `G` has edges with `weight` attribute the edge data are used as
weight values.
Parameters
----------
G : NetworkX DiGraph
A directed acyclic graph (DAG)
weight : str, optional
Edge data key to use for weight
default_weight : int, optional
The weight of edges that do not have a weight attribute
topo_order: list or tuple, optional
A topological order for G (if None, the function will compute one)
Returns
-------
list
Longest path
Raises
------
NetworkXNotImplemented
If `G` is not directed
See also
--------
dag_longest_path_length
"""
if not G:
return []
if topo_order is None:
topo_order = nx.topological_sort(G)
dist = {} # stores {v : (length, u)}
for v in topo_order:
us = [
(dist[u][0] + data.get(weight, default_weight), u)
for u, data in G.pred[v].items()
]
# Use the best predecessor if there is one and its distance is
# non-negative, otherwise terminate.
maxu = max(us, key=lambda x: x[0]) if us else (0, v)
dist[v] = maxu if maxu[0] >= 0 else (0, v)
u = None
v = max(dist, key=lambda x: dist[x][0])
path = []
while u != v:
path.append(v)
u = v
v = dist[v][1]
path.reverse()
return path
@not_implemented_for("undirected")
def dag_longest_path_length(G, weight="weight", default_weight=1):
"""Returns the longest path length in a DAG
Parameters
----------
G : NetworkX DiGraph
A directed acyclic graph (DAG)
weight : string, optional
Edge data key to use for weight
default_weight : int, optional
The weight of edges that do not have a weight attribute
Returns
-------
int
Longest path length
Raises
------
NetworkXNotImplemented
If `G` is not directed
See also
--------
dag_longest_path
"""
path = nx.dag_longest_path(G, weight, default_weight)
path_length = 0
for (u, v) in pairwise(path):
path_length += G[u][v].get(weight, default_weight)
return path_length
def root_to_leaf_paths(G):
"""Yields root-to-leaf paths in a directed acyclic graph.
`G` must be a directed acyclic graph. If not, the behavior of this
function is undefined. A "root" in this graph is a node of in-degree
zero and a "leaf" a node of out-degree zero.
When invoked, this function iterates over each path from any root to
any leaf. A path is a list of nodes.
"""
roots = (v for v, d in G.in_degree() if d == 0)
leaves = (v for v, d in G.out_degree() if d == 0)
all_paths = partial(nx.all_simple_paths, G)
# TODO In Python 3, this would be better as `yield from ...`.
return chaini(starmap(all_paths, product(roots, leaves)))
@not_implemented_for("multigraph")
@not_implemented_for("undirected")
def dag_to_branching(G):
"""Returns a branching representing all (overlapping) paths from
root nodes to leaf nodes in the given directed acyclic graph.
As described in :mod:`networkx.algorithms.tree.recognition`, a
*branching* is a directed forest in which each node has at most one
parent. In other words, a branching is a disjoint union of
*arborescences*. For this function, each node of in-degree zero in
`G` becomes a root of one of the arborescences, and there will be
one leaf node for each distinct path from that root to a leaf node
in `G`.
Each node `v` in `G` with *k* parents becomes *k* distinct nodes in
the returned branching, one for each parent, and the sub-DAG rooted
at `v` is duplicated for each copy. The algorithm then recurses on
the children of each copy of `v`.
Parameters
----------
G : NetworkX graph
A directed acyclic graph.
Returns
-------
DiGraph
The branching in which there is a bijection between root-to-leaf
paths in `G` (in which multiple paths may share the same leaf)
and root-to-leaf paths in the branching (in which there is a
unique path from a root to a leaf).
Each node has an attribute 'source' whose value is the original
node to which this node corresponds. No other graph, node, or
edge attributes are copied into this new graph.
Raises
------
NetworkXNotImplemented
If `G` is not directed, or if `G` is a multigraph.
HasACycle
If `G` is not acyclic.
Examples
--------
To examine which nodes in the returned branching were produced by
which original node in the directed acyclic graph, we can collect
the mapping from source node to new nodes into a dictionary. For
example, consider the directed diamond graph::
>>> from collections import defaultdict
>>> from operator import itemgetter
>>>
>>> G = nx.DiGraph(nx.utils.pairwise("abd"))
>>> G.add_edges_from(nx.utils.pairwise("acd"))
>>> B = nx.dag_to_branching(G)
>>>
>>> sources = defaultdict(set)
>>> for v, source in B.nodes(data="source"):
... sources[source].add(v)
>>> len(sources["a"])
1
>>> len(sources["d"])
2
To copy node attributes from the original graph to the new graph,
you can use a dictionary like the one constructed in the above
example::
>>> for source, nodes in sources.items():
... for v in nodes:
... B.nodes[v].update(G.nodes[source])
Notes
-----
This function is not idempotent in the sense that the node labels in
the returned branching may be uniquely generated each time the
function is invoked. In fact, the node labels may not be integers;
in order to relabel the nodes to be more readable, you can use the
:func:`networkx.convert_node_labels_to_integers` function.
The current implementation of this function uses
:func:`networkx.prefix_tree`, so it is subject to the limitations of
that function.
"""
if has_cycle(G):
msg = "dag_to_branching is only defined for acyclic graphs"
raise nx.HasACycle(msg)
paths = root_to_leaf_paths(G)
B = nx.prefix_tree(paths)
# Remove the synthetic `root`(0) and `NIL`(-1) nodes from the tree
B.remove_node(0)
B.remove_node(-1)
return B
| 30.574207 | 90 | 0.617455 |
c7e42ba038c5fac830c4a4bef762efcbc258e105 | 4,130 | py | Python | test/IECoreGL/ToGLTextureConverter.py | aitorvfx/cortex | c0c27794fc67ccfce68b064e284747165c49ef1c | [
"BSD-3-Clause"
] | 5 | 2015-09-13T14:49:30.000Z | 2017-02-04T21:04:59.000Z | test/IECoreGL/ToGLTextureConverter.py | aitorvfx/cortex | c0c27794fc67ccfce68b064e284747165c49ef1c | [
"BSD-3-Clause"
] | 1 | 2018-11-07T19:40:15.000Z | 2018-11-07T19:40:15.000Z | test/IECoreGL/ToGLTextureConverter.py | aitorvfx/cortex | c0c27794fc67ccfce68b064e284747165c49ef1c | [
"BSD-3-Clause"
] | 3 | 2015-02-03T17:13:40.000Z | 2022-01-07T15:55:00.000Z | ##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os.path
import IECore
import IECoreImage
import IECoreGL
IECoreGL.init( False )
class TestToGLTexureConverter( unittest.TestCase ) :
def testFromImage( self ) :
""" Test conversion from an ImagePrimitive """
i = IECore.Reader.create( os.path.dirname( __file__ ) + "/images/colorBarsWithAlphaF512x512.exr" ).read()
t = IECoreGL.ToGLTextureConverter( i ).convert()
self.failIf( not t.isInstanceOf( IECoreGL.Texture.staticTypeId() ) )
ii = t.imagePrimitive()
res = IECoreImage.ImageDiffOp()(
imageA = i,
imageB = ii,
maxError = 0.01,
skipMissingChannels = False
)
self.failIf( res.value )
def testFromCompoundData( self ) :
""" Test conversion from a CompoundData representation of an ImagePrimitive """
i = IECore.Reader.create( os.path.dirname( __file__ ) + "/images/colorBarsWithAlphaF512x512.exr" ).read()
cd = IECore.CompoundData()
cd["displayWindow"] = IECore.Box2iData( i.displayWindow )
cd["dataWindow"] = IECore.Box2iData( i.dataWindow )
cnd = IECore.CompoundData()
for channel in i.channelNames() :
cnd[ channel ] = i[ channel ]
cd["channels"] = cnd
t = IECoreGL.ToGLTextureConverter( cd ).convert()
self.failIf( not t.isInstanceOf( IECoreGL.Texture.staticTypeId() ) )
ii = t.imagePrimitive()
res = IECoreImage.ImageDiffOp()(
imageA = i,
imageB = ii,
maxError = 0.01,
skipMissingChannels = False
)
self.failIf( res.value )
def testMissingChannelCreation( self ) :
""" Test the creation of missing channels """
i = IECore.Reader.create( os.path.dirname( __file__ ) + "/images/colorBarsWithAlphaF512x512.exr" ).read()
cd = IECore.CompoundData()
cd["displayWindow"] = IECore.Box2iData( i.displayWindow )
cd["dataWindow"] = IECore.Box2iData( i.dataWindow )
cnd = IECore.CompoundData()
cnd[ "R" ] = i[ "R" ]
cd["channels"] = cnd
# We are missing a channel and so an exception should be thrown if we try to convert it with the default arguments.
self.assertRaises( RuntimeError, IECoreGL.ToGLTextureConverter( cd ).convert )
t = IECoreGL.ToGLTextureConverter( cd, True ).convert()
ii = t.imagePrimitive()
self.assertTrue( "R" in ii.channelNames() )
self.assertTrue( "G" in ii.channelNames() )
self.assertTrue( "B" in ii.channelNames() )
if __name__ == "__main__":
unittest.main()
| 33.852459 | 117 | 0.696126 |
16f86139874ca21479469361aaed457a63448edd | 1,611 | py | Python | py/cidoc_crm_types/properties/p16_used_specific_object.py | minorg/cidoc-crm-types | 9018bdbf0658e4d28a87bc94543e467be45d8aa5 | [
"Apache-2.0"
] | null | null | null | py/cidoc_crm_types/properties/p16_used_specific_object.py | minorg/cidoc-crm-types | 9018bdbf0658e4d28a87bc94543e467be45d8aa5 | [
"Apache-2.0"
] | null | null | null | py/cidoc_crm_types/properties/p16_used_specific_object.py | minorg/cidoc-crm-types | 9018bdbf0658e4d28a87bc94543e467be45d8aa5 | [
"Apache-2.0"
] | null | null | null | from .p12_occurred_in_the_presence_of import P12OccurredInThePresenceOf
from .p15_was_influenced_by import P15WasInfluencedBy
from dataclasses import dataclass
@dataclass
class P16UsedSpecificObject(P12OccurredInThePresenceOf, P15WasInfluencedBy):
"""
Scope note:
This property describes the use of material or immaterial things in a way essential to the performance or the outcome of an instance of E7 Activity.
This property typically applies to tools, instruments, moulds, raw materials and items embedded in a product. It implies that the presence of the object in question was a necessary condition for the action. For example, the activity of writing this text required the use of a computer. An immaterial thing can be used if at least one of its carriers is present. For example, the software tools on a computer.
Another example is the use of a particular name by a particular group of people over some span to identify a thing, such as a settlement. In this case, the physical carriers of this name are at least the people understanding its use.
Examples:
- the writing of this scope note (E7) used specific object Nicholas Crofts' computer (E22) mode of use Typing Tool; Storage Medium (E55)
- the people of Iraq calling the place identified by TGN '7017998' (E7) used specific object "Quyunjig" (E44) mode of use Current; Vernacular (E55)
In First Order Logic:
P16 (x,y) ⊃ E7(x)
P16 (x,y) ⊃ E70(y)
P16 (x,y) ⊃ P12(x,y)
P16 (x,y) ⊃ P15(x,y)
P16(x,y,z) ⊃ [P16(x,y) ∧ E55(z)]
"""
URI = "http://erlangen-crm.org/current/P16_used_specific_object"
| 51.967742 | 408 | 0.77095 |
82a3a2f4417dfb016097ec9ecd8ac60afc8d6b7d | 4,786 | py | Python | can_tools/scrapers/official/CA/ca_state.py | jrybacek/can-scrapers | 1a32a45be6aa6630de4d100c56c2a8659a1b1025 | [
"MIT"
] | null | null | null | can_tools/scrapers/official/CA/ca_state.py | jrybacek/can-scrapers | 1a32a45be6aa6630de4d100c56c2a8659a1b1025 | [
"MIT"
] | null | null | null | can_tools/scrapers/official/CA/ca_state.py | jrybacek/can-scrapers | 1a32a45be6aa6630de4d100c56c2a8659a1b1025 | [
"MIT"
] | null | null | null | from typing import Any
import pandas as pd
import us
from can_tools.scrapers.base import CMU
from can_tools.scrapers.official.base import StateQueryAPI
class CaliforniaCasesDeaths(StateQueryAPI):
"""
Fetch county level covid data from California state dashbaord
"""
apiurl = "https://data.ca.gov/api/3/action/datastore_search"
source = "https://covid19.ca.gov/state-dashboard"
state_fips = int(us.states.lookup("California").fips)
has_location = False
location_type = "county"
resource_id = "926fd08f-cc91-4828-af38-bd45de97f8c3"
def fetch(self) -> Any:
return self.raw_from_api(self.resource_id, limit=1000)
def pre_normalize(self, data) -> pd.DataFrame:
"""
Normalizes the list of json objects that corresponds with case
and death data
Parameters
----------
data : List
A list of json elements
Returns
-------
df : pd.DataFrame
A DataFrame with the normalized data
"""
# Map current column names to CMU elements
crename = {
"newcountconfirmed": CMU(
category="cases", measurement="new", unit="people"
),
"totalcountconfirmed": CMU(
category="cases", measurement="cumulative", unit="people"
),
"newcountdeaths": CMU(category="deaths", measurement="new", unit="people"),
"totalcountdeaths": CMU(
category="deaths", measurement="cumulative", unit="people"
),
}
# Read in data and convert to long format
df = self.data_from_raw(data).rename(columns={"county": "location_name"})
df["dt"] = pd.to_datetime(df["date"])
# Move things into long format
df = df.melt(
id_vars=["location_name", "dt"], value_vars=crename.keys()
).dropna()
# Determine the category of each observation
df = self.extract_CMU(df, crename)
cols_to_keep = [
"dt",
"location_name",
"category",
"measurement",
"unit",
"age",
"race",
"ethnicity",
"sex",
"value",
]
return df.loc[:, cols_to_keep]
def normalize(self, data) -> pd.DataFrame:
# Normalize case/death and hospital data
out = self.pre_normalize(data)
out["vintage"] = self._retrieve_vintage()
# Drop the information that we won't be keeping track of
loc_not_keep = ["Out Of Country", "Unassigned"]
out = out.loc[~out["location_name"].isin(loc_not_keep), :]
return out
class CaliforniaHospitals(CaliforniaCasesDeaths):
resource_id = "42d33765-20fd-44b8-a978-b083b7542225"
def pre_normalize(self, data) -> pd.DataFrame:
"""
Get icu and hospital usage by covid patients from the OpenDataCali api
Parameters
----------
data : List
A list of json elements
Returns
-------
df: pd.DataFrame
A pandas DataFrame containing icu+hospital usage for each county
"""
# Rename columns and subset data
crename = {
"hospitalized_covid_patients": CMU(
category="hospital_beds_in_use_covid",
measurement="current",
unit="beds",
),
"all_hospital_beds": CMU(
category="hospital_beds_capacity", measurement="current", unit="beds"
),
"icu_covid_patients": CMU(
category="icu_beds_in_use_covid", measurement="current", unit="beds"
),
}
# Read in data and convert to long format
df = self.data_from_raw(data).rename(columns={"county": "location_name"})
# Convert column to date
df = df.replace("None", None)
df = df.apply(lambda x: pd.to_numeric(x, errors="ignore"))
df["dt"] = pd.to_datetime(df["todays_date"])
# Create a total number of icu covid patients
df["icu_covid_patients"] = df.eval(
"icu_covid_confirmed_patients + icu_suspected_covid_patients"
)
# Reshape
out = df.melt(
id_vars=["dt", "location_name"], value_vars=crename.keys()
).dropna()
# Determine the category and demographics of each observation
out = self.extract_CMU(out, crename)
cols_to_keep = [
"dt",
"location_name",
"category",
"measurement",
"unit",
"age",
"race",
"ethnicity",
"sex",
"value",
]
return out.loc[:, cols_to_keep]
| 29.9125 | 87 | 0.559967 |
93a0b67e6d63c8312ab19c78dead5ce71a35b397 | 3,933 | py | Python | lib/examples/nacl_amb_trajrst/aux_functions.py | ajoshpratt/westpa | 545a42a5ae4cfa77de0e125a38a5b1ec2b9ab218 | [
"MIT"
] | 1 | 2019-12-21T09:11:54.000Z | 2019-12-21T09:11:54.000Z | lib/examples/nacl_amb_trajrst/aux_functions.py | astatide/westpa | 545a42a5ae4cfa77de0e125a38a5b1ec2b9ab218 | [
"MIT"
] | 1 | 2020-04-14T20:49:38.000Z | 2020-04-14T20:49:38.000Z | lib/examples/nacl_amb_trajrst/aux_functions.py | ajoshpratt/westpa | 545a42a5ae4cfa77de0e125a38a5b1ec2b9ab218 | [
"MIT"
] | 1 | 2020-04-14T20:42:11.000Z | 2020-04-14T20:42:11.000Z | #!/usr/bin/env python
import numpy
import mdtraj as md
import h5py
import westpa
import StringIO
import tempfile
import os
def pcoord_loader(fieldname, pcoord_return_filename, segment, single_point, trajectory, restart):
"""Read progress coordinate data into the ``pcoord`` field on ``destobj``.
An exception will be raised if the data is malformed. If ``single_point`` is true,
then only one (N-dimensional) point will be read, otherwise system.pcoord_len points
will be read.
"""
system = westpa.rc.get_system_driver()
assert fieldname == 'pcoord'
# Load the file! Notice that we're just loading the trajectory, not the pcoord file itself.
# for get bstates
try:
if single_point:
b = md.load_restrt(trajectory, top=os.path.join(restart, 'nacl.prmtop'))
else:
b = md.load_netcdf(trajectory, top=os.path.join(restart, 'nacl.prmtop'))
except:
segment.pcoord = 1
else:
# Do it from atoms 1 and 2.
pcoord = md.compute_distances(b, [[0,1]]) * 10
#print(pcoord)
if single_point:
expected_shape = (system.pcoord_ndim,)
pcoord = pcoord[0]
if pcoord.ndim == 0:
pcoord.shape = (1,)
else:
expected_shape = (system.pcoord_len, system.pcoord_ndim)
if pcoord.ndim == 1:
pcoord.shape = (len(pcoord),1)
if pcoord.shape != expected_shape:
raise ValueError('progress coordinate data has incorrect shape {!r} [expected {!r}]'.format(pcoord.shape,
expected_shape))
segment.pcoord = pcoord
def coord_loader(fieldname, coord_filename, segment, single_point=False):
"""
Loads and stores coordinates
**Arguments:**
:*fieldname*: Key at which to store dataset
:*coord_filename*: Temporary file from which to load coordinates
:*segment*: WEST segment
:*single_point*: Data to be stored for a single frame
(should always be false)
"""
# Load coordinates
n_frames = 6
n_atoms = 2
coord = numpy.loadtxt(coord_filename, dtype = numpy.float32)
coord = numpy.reshape(coord, (n_frames, n_atoms, 3))
# Save to hdf5
segment.data[fieldname] = coord
def log_loader(fieldname, log_filename, segment, single_point=False):
"""
Loads and stores log
**Arguments:**
:*fieldname*: Key at which to store dataset
:*log_filename*: Temporary file from which to load log
:*segment*: WEST segment
:*single_point*: Data to be stored for a single frame
(should always be false)
"""
# Load log
with open(log_filename, 'r') as log_file:
raw_text = [line.strip() for line in log_file.readlines()]
# Determine number of fields
n_frames = 6
n_fields = 0
line_i = 0
starts = []
while line_i < len(raw_text):
line = raw_text[line_i]
start = line.split()[0]
if start in starts:
break
else:
starts.append(start)
n_fields += line.count('=')
line_i += 1
dataset = numpy.zeros((n_frames, n_fields), numpy.float32)
# Parse data
line_i = 0
frame_i = 0
field_i = 0
while line_i < len(raw_text):
line = raw_text[line_i]
for field in line.split():
try:
float(field)
dataset[frame_i, field_i] = float(field)
if field_i == n_fields - 1:
frame_i += 1
field_i = 0
else:
field_i += 1
except ValueError:
pass
line_i += 1
# Save to hdf5
segment.data[fieldname] = dataset
| 32.504132 | 120 | 0.567251 |
e333ee5bf2460b822ba379f438108203e4982540 | 64,843 | py | Python | src/command_modules/azure-cli-acs/azure/cli/command_modules/acs/custom.py | mickeymitic/azure-cli | 92af6b3cea52f99eee84df93f5d3e2003a273d04 | [
"MIT"
] | 1 | 2018-01-30T05:55:29.000Z | 2018-01-30T05:55:29.000Z | src/command_modules/azure-cli-acs/azure/cli/command_modules/acs/custom.py | mickeymitic/azure-cli | 92af6b3cea52f99eee84df93f5d3e2003a273d04 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-acs/azure/cli/command_modules/acs/custom.py | mickeymitic/azure-cli | 92af6b3cea52f99eee84df93f5d3e2003a273d04 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import base64
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core._environment import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.profiles import ResourceType
from azure.cli.core.util import shell_safe_json_parse, truncate_text
from azure.graphrbac.models import (ApplicationCreateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters)
from azure.mgmt.authorization.models import RoleAssignmentProperties
from azure.mgmt.containerservice.models import ContainerServiceAgentPoolProfile
from azure.mgmt.containerservice.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.models import ContainerServiceServicePrincipalProfile
from azure.mgmt.containerservice.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.models import ManagedCluster
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_container_services
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def _resource_client_factory(cli_ctx):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if orchestrator_type == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group, disable_browser, ssh_key_file=ssh_key_file)
elif orchestrator_type == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
else:
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
return
def acs_install_cli(cmd, client, resource_group, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
elif orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
else:
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4):
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None):
"""Install kubectl, a command-line interface for Kubernetes clusters."""
if client_version == 'latest':
context = _ssl_context()
version = urlopen('https://storage.googleapis.com/kubernetes-release/release/stable.txt',
context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = 'https://storage.googleapis.com/kubernetes-release/release/{}/bin/{}/amd64/{}'
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s from %s', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
def k8s_install_connector(cmd, client, name, resource_group_name, connector_name,
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = 'Helm not detected, please verify if it is installed.'
node_prefix = 'virtual-kubelet-' + connector_name.lower()
url_chart = chart_url
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Validate if the RG exists
groups = _resource_client_factory(cmd.cli_ctx).resource_groups
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rgkaci = groups.get(resource_group_name)
# Auto assign the location
if location is None:
location = rgkaci.location # pylint:disable=no-member
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
subscription_id = _get_subscription_id(cmd.cli_ctx)
dns_name_prefix = _get_default_dns_prefix(connector_name, resource_group_name, subscription_id)
# Ensure that the SPN exists
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, connector_name)
client_secret = principal_obj.get('client_secret')
service_principal = principal_obj.get('service_principal')
# Get the TenantID
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, tenant_id = profile.get_login_credentials()
# Check if we want the linux connector
if os_type.lower() in ['linux', 'both']:
_helm_install_aci_connector(url_chart, connector_name, service_principal, client_secret,
subscription_id, tenant_id, rgkaci.name, location,
node_prefix + '-linux', 'Linux')
# Check if we want the windows connector
if os_type.lower() in ['windows', 'both']:
_helm_install_aci_connector(url_chart, connector_name, service_principal, client_secret,
subscription_id, tenant_id, rgkaci.name, location,
node_prefix + '-win', 'Windows')
def _helm_install_aci_connector(url_chart, connector_name, service_principal, client_secret,
subscription_id, tenant_id, aci_resource_group, aci_region,
node_name, os_type):
image_tag = 'latest'
node_taint = 'azure.com/aci'
helm_release_name = connector_name.lower() + "-" + os_type.lower()
logger.warning("Deploying the ACI connector for '%s' using Helm", os_type)
try:
subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", "env.azureClientId=" +
service_principal + ",env.azureClientKey=" + client_secret + ",env.azureSubscriptionId=" +
subscription_id + ",env.azureTenantId=" + tenant_id + ",env.aciResourceGroup=" +
aci_resource_group + ",env.aciRegion=" + aci_region + ",image.tag=" + image_tag +
",env.nodeName=" + node_name + ",env.nodeTaint=" + node_taint + ",env.nodeOsType=" + os_type])
except subprocess.CalledProcessError as err:
raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err))
def k8s_uninstall_connector(cmd, client, name, connector_name, resource_group_name,
graceful=False, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = "Error : Helm not detected, please verify if it is installed."
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
node_prefix = 'virtual-kubelet-' + connector_name.lower()
if os_type.lower() in ['linux', 'both']:
_undeploy_connector(graceful, node_prefix + '-linux', connector_name.lower() + '-linux')
if os_type.lower() in ['windows', 'both']:
_undeploy_connector(graceful, node_prefix + '-win', connector_name.lower() + '-windows')
def _undeploy_connector(graceful, node_name, helm_release_name):
if graceful:
logger.warning('Graceful option selected, will try to drain the node first')
from subprocess import PIPE, Popen
kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.'
try:
Popen(["kubectl"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(kubectl_not_installed)
try:
drain_node = subprocess.check_output(
['kubectl', 'drain', node_name, '--force'],
universal_newlines=True)
if not drain_node:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type')
except subprocess.CalledProcessError as err:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type option: {}'.format(err))
logger.warning("Undeploying the '%s' using Helm", helm_release_name)
try:
subprocess.call(['helm', 'del', helm_release_name, '--purge'])
except subprocess.CalledProcessError as err:
raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err))
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
result = create_application(rbac_client.applications, name, url, [url], password=client_secret)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal, delay=2):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _get_subscription_id(cli_ctx):
_, sub_id, _ = Profile(cli_ctx=cli_ctx).get_login_credentials(subscription_id=None)
return sub_id
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = _get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
groups = _resource_client_factory(cmd.cli_ctx).resource_groups
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
if location is None:
location = rg.location # pylint:disable=no-member
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait):
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentProperties
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceManagementClient).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return smc.create_or_update(resource_group_name, deployment_name, properties, raw=no_wait)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key):
if addition[key]:
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
if i not in existing[key]:
existing[key].append(i)
def merge_kubernetes_configurations(existing_file, addition_file):
try:
with open(existing_file) as stream:
existing = yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(existing_file))
else:
raise
except yaml.parser.ParserError as ex:
raise CLIError('Error parsing {} ({})'.format(existing_file, str(ex)))
try:
with open(addition_file) as stream:
addition = yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(existing_file))
else:
raise
except yaml.parser.ParserError as ex:
raise CLIError('Error parsing {} ({})'.format(addition_file, str(ex)))
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters')
_handle_merge(existing, addition, 'users')
_handle_merge(existing, addition, 'contexts')
existing['current-context'] = addition['current-context']
with open(existing_file, 'w+') as stream:
yaml.dump(existing, stream, default_flow_style=True)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants,
display_name,
identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date, end_date, str(uuid.uuid4()), password)]
elif key_value:
key_creds = [KeyCredential(start_date, end_date, key_value, str(uuid.uuid4()), key_usage, key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id, True))
def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope)
def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
properties = RoleAssignmentProperties(role_id, object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, properties, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
elif len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
proxy_url = 'http://127.0.0.1:8001/'
_, browse_path = tempfile.mkstemp()
# TODO: need to add an --admin option?
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--output", "name",
"--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove the "pods/" prefix from the name
dashboard_pod = str(dashboard_pod)[5:].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# launch kubectl port-forward locally to access the remote dashboard
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(proxy_url)
subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", dashboard_pod, "8001:9090"])
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
kubernetes_version="1.7.7",
node_vm_size="Standard_DS1_v2",
node_osdisk_size=0,
node_count=3,
service_principal=None, client_secret=None,
tags=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
no_wait=False):
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
subscription_id = _get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
groups = _resource_client_factory(cmd.cli_ctx).resource_groups
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
if location is None:
location = rg.location # pylint:disable=no-member
ssh_config = ContainerServiceSshConfiguration(
[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
agent_pool_profile = ContainerServiceAgentPoolProfile(
'nodepool1', # Must be 12 chars or less before ACS RP adds to it
count=int(node_count),
vm_size=node_vm_size,
dns_prefix=dns_name_prefix,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
linux_profile = ContainerServiceLinuxProfile(admin_username, ssh=ssh_config)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ContainerServiceServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
service_principal_profile=service_principal_profile)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return client.create_or_update(
resource_group_name=resource_group_name, resource_name=name, parameters=mc, raw=no_wait)
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config')):
access_profile = client.get_access_profiles(
resource_group_name, name, "clusterAdmin" if admin else "clusterUser")
if not access_profile:
raise CLIError("No Kubernetes access profile found.")
else:
encoded_kubeconfig = access_profile.kube_config
kubeconfig = base64.b64decode(encoded_kubeconfig).decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig)
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_scale(cmd, client, resource_group_name, name, node_count, no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
instance.agent_pool_profiles[0].count = int(node_count) # pylint: disable=no-member
# null out the service principal because otherwise validation complains
instance.service_principal_profile = None
return client.create_or_update(resource_group_name, name, instance, raw=no_wait)
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, no_wait=False, **kwargs): # pylint: disable=unused-argument
instance = client.get(resource_group_name, name)
instance.kubernetes_version = kubernetes_version
# null out the service principal because otherwise validation complains
instance.service_principal_profile = None
return client.create_or_update(resource_group_name, name, instance, raw=no_wait)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8')
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8')
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal)
return load_acs_service_principal(subscription_id)
def _print_or_merge_credentials(path, kubeconfig):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with open(path, 'w+t'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['dns_prefix', 'fqdn', 'os_disk_size_gb', 'ports', 'vnet_subnet_id']
sp_attrs = ['key_vault_secret_ref', 'secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
| 42.970842 | 222 | 0.673642 |
86ff8aa3c0f5e7147e168e4b136a20e314fbc947 | 1,030 | py | Python | test/test_v1_network_create_request.py | metal-stack/metal-python | cdf40fa86d2b2944f9818cef1c6723b1eecc506e | [
"MIT"
] | 7 | 2020-12-21T05:24:24.000Z | 2022-02-12T20:55:32.000Z | test/test_v1_network_create_request.py | metal-stack/metal-python | cdf40fa86d2b2944f9818cef1c6723b1eecc506e | [
"MIT"
] | 6 | 2020-09-16T07:23:34.000Z | 2022-01-18T12:05:30.000Z | test/test_v1_network_create_request.py | metal-stack/metal-python | cdf40fa86d2b2944f9818cef1c6723b1eecc506e | [
"MIT"
] | null | null | null | # coding: utf-8
"""
metal-api
API to manage and control plane resources like machines, switches, operating system images, machine sizes, networks, IP addresses and more # noqa: E501
OpenAPI spec version: v0.15.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import metal_python
from metal_python.models.v1_network_create_request import V1NetworkCreateRequest # noqa: E501
from metal_python.rest import ApiException
class TestV1NetworkCreateRequest(unittest.TestCase):
"""V1NetworkCreateRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1NetworkCreateRequest(self):
"""Test V1NetworkCreateRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = metal_python.models.v1_network_create_request.V1NetworkCreateRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.121951 | 156 | 0.729126 |
8e823184e40d261e02029ad2ac88824a82b517ca | 434 | py | Python | wafec_tests_openstack_testware/src/wafec_tests_openstack_testware/exceptions.py | wafec/wafec-tests-openstack-testware | f06789800b40f316183736ee805af4b352a825ab | [
"MIT"
] | null | null | null | wafec_tests_openstack_testware/src/wafec_tests_openstack_testware/exceptions.py | wafec/wafec-tests-openstack-testware | f06789800b40f316183736ee805af4b352a825ab | [
"MIT"
] | null | null | null | wafec_tests_openstack_testware/src/wafec_tests_openstack_testware/exceptions.py | wafec/wafec-tests-openstack-testware | f06789800b40f316183736ee805af4b352a825ab | [
"MIT"
] | null | null | null | import abc
__all__ = [
'NotFoundException',
'StateMismatchException',
'ArgumentException',
'IllegalStateException',
'ExceptionBase'
]
class ExceptionBase(Exception, metaclass=abc.ABCMeta):
pass
class NotFoundException(ExceptionBase):
pass
class StateMismatchException(ExceptionBase):
pass
class ArgumentException(ExceptionBase):
pass
class IllegalStateException(ExceptionBase):
pass
| 14.466667 | 54 | 0.739631 |
0bf1839ce9594366cf07a158cba9039025d1ce2b | 4,220 | py | Python | venv1/Lib/site-packages/tensorflow/contrib/quantize/python/common.py | Soum-Soum/Tensorflow_Face_Finder | fec6c15d2df7012608511ad87f4b55731bf99478 | [
"Apache-2.0",
"MIT"
] | null | null | null | venv1/Lib/site-packages/tensorflow/contrib/quantize/python/common.py | Soum-Soum/Tensorflow_Face_Finder | fec6c15d2df7012608511ad87f4b55731bf99478 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-05-20T00:58:04.000Z | 2021-05-20T00:58:04.000Z | venv1/Lib/site-packages/tensorflow/contrib/quantize/python/common.py | Soum-Soum/Tensorflow_Face_Finder | fec6c15d2df7012608511ad87f4b55731bf99478 | [
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities used across this package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
# Skip all operations that are backprop related or export summaries.
SKIPPED_PREFIXES = (
'gradients/', 'RMSProp/', 'Adagrad/', 'Const_', 'HistogramSummary',
'ScalarSummary')
# Valid activation ops for quantization end points.
_ACTIVATION_OP_SUFFIXES = ['/Relu6', '/Relu', '/Identity']
# Regular expression for recognizing nodes that are part of batch norm group.
_BATCHNORM_RE = re.compile(r'^(.*)/BatchNorm/batchnorm')
def BatchNormGroups(graph):
"""Finds batch norm layers, returns their prefixes as a list of strings.
Args:
graph: Graph to inspect.
Returns:
List of strings, prefixes of batch norm group names found.
"""
bns = []
for op in graph.get_operations():
match = _BATCHNORM_RE.search(op.name)
if match:
bn = match.group(1)
if not bn.startswith(SKIPPED_PREFIXES):
bns.append(bn)
# Filter out duplicates.
return list(collections.OrderedDict.fromkeys(bns))
def GetEndpointActivationOp(graph, prefix):
"""Returns an Operation with the given prefix and a valid end point suffix.
Args:
graph: Graph where to look for the operation.
prefix: String, prefix of Operation to return.
Returns:
The Operation with the given prefix and a valid end point suffix or None if
there are no matching operations in the graph for any valid suffix
"""
for suffix in _ACTIVATION_OP_SUFFIXES:
activation = _GetOperationByNameDontThrow(graph, prefix + suffix)
if activation:
return activation
return None
def _GetOperationByNameDontThrow(graph, name):
"""Returns an Operation with the given name.
Args:
graph: Graph where to look for the operation.
name: String, name of Operation to return.
Returns:
The Operation with the given name. None if the name does not correspond to
any operation in the graph
"""
try:
return graph.get_operation_by_name(name)
except KeyError:
return None
def CreateOrGetQuantizationStep():
"""Returns a Tensor of the number of steps the quantized graph has run.
Returns:
Quantization step Tensor.
"""
quantization_step_name = 'fake_quantization_step'
quantization_step_tensor_name = quantization_step_name + '/AssignAdd:0'
g = ops.get_default_graph()
try:
return g.get_tensor_by_name(quantization_step_tensor_name)
except KeyError:
# Create in proper graph and base name_scope.
with g.name_scope(None):
quantization_step_tensor = variable_scope.get_variable(
quantization_step_name,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
with g.name_scope(quantization_step_tensor.op.name + '/'):
# We return the incremented variable tensor. Since this is used in conds
# for quant_delay and freeze_bn_delay, it will run once per graph
# execution.
return state_ops.assign_add(quantization_step_tensor, 1)
| 34.308943 | 81 | 0.702133 |
acb6ae3d37e06e815fb740e6044b7da4545bff9d | 2,350 | py | Python | lithops/tests/util_func/map_util.py | pywren/pywren-ibm-cloud | 34ede62e3cc62f4626f9be89db4993bc6a8455a7 | [
"Apache-2.0"
] | 55 | 2018-04-23T09:58:56.000Z | 2020-09-09T11:47:16.000Z | lithops/tests/util_func/map_util.py | pywren/pywren-ibm-cloud | 34ede62e3cc62f4626f9be89db4993bc6a8455a7 | [
"Apache-2.0"
] | 256 | 2018-05-20T13:01:51.000Z | 2020-09-16T09:09:54.000Z | lithops/tests/util_func/map_util.py | pywren/pywren-ibm-cloud | 34ede62e3cc62f4626f9be89db4993bc6a8455a7 | [
"Apache-2.0"
] | 35 | 2018-04-23T09:07:57.000Z | 2020-08-12T13:43:06.000Z | import lithops
import logging
logger = logging.getLogger(__name__)
def simple_map_function(x, y):
return x + y
def concat(lst):
return " ".join(lst)
def hello_world(param):
return "Hello World!"
def lithops_inside_lithops_map_function(x):
def _func(x):
return x
fexec = lithops.FunctionExecutor()
fexec.map(_func, range(x))
return fexec.get_result()
def lithops_return_futures_map_function1(x):
def _func(x):
return x + 1
fexec = lithops.FunctionExecutor()
return fexec.map(_func, range(x))
def lithops_return_futures_map_function2(x):
def _func(x):
return x + 1
fexec = lithops.FunctionExecutor()
return fexec.call_async(_func, x + 5)
def lithops_return_futures_map_function3(x):
def _func(x):
return x + 1
fexec = lithops.FunctionExecutor()
fut1 = fexec.map(_func, range(x))
fut2 = fexec.map(_func, range(x))
return fut1 + fut2
def my_map_function_obj(obj, id):
"""returns a dictionary of {word:number of appearances} key:value items."""
print('Function id: {}'.format(id))
print('Bucket: {}'.format(obj.bucket))
print('Key: {}'.format(obj.key))
print('Partition num: {}'.format(obj.part))
print('Chunk size: {}'.format(obj.chunk_size))
print('Byte range: {}'.format(obj.data_byte_range))
counter = {}
data = obj.data_stream.read()
print('Data lenght: {}'.format(len(data)))
for line in data.splitlines():
for word in line.decode('utf-8').split():
if word not in counter:
counter[word] = 1
else:
counter[word] += 1
logger.info('Testing map_reduce() over a bucket')
return counter
def my_map_function_url(id, obj):
print('I am processing the object from {}'.format(obj.url))
print('Function id: {}'.format(id))
print('Partition num: {}'.format(obj.part))
print('Chunk size: {}'.format(obj.chunk_size))
print('Byte range: {}'.format(obj.data_byte_range))
counter = {}
data = obj.data_stream.read()
print('Data lenght: {}'.format(len(data)))
for line in data.splitlines():
for word in line.decode('utf-8').split():
if word not in counter:
counter[word] = 1
else:
counter[word] += 1
return counter
| 23.979592 | 79 | 0.620426 |
a2a303f916c799cd01ca2222c53ee156bc8b4c8b | 1,571 | py | Python | src/meetshaus.jmscontent/meetshaus/jmscontent/tests.py | potzenheimer/buildout.jms | c4f5e440840a7faf464c71d0bc0cc1ac50535f3f | [
"MIT"
] | null | null | null | src/meetshaus.jmscontent/meetshaus/jmscontent/tests.py | potzenheimer/buildout.jms | c4f5e440840a7faf464c71d0bc0cc1ac50535f3f | [
"MIT"
] | null | null | null | src/meetshaus.jmscontent/meetshaus/jmscontent/tests.py | potzenheimer/buildout.jms | c4f5e440840a7faf464c71d0bc0cc1ac50535f3f | [
"MIT"
] | null | null | null | import doctest
import unittest
from Testing import ZopeTestCase as ztc
from Products.Five import zcml
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import PloneSite
from Products.PloneTestCase.layer import onsetup
import meetshaus.jmscontent
OPTION_FLAGS = doctest.NORMALIZE_WHITESPACE | \
doctest.ELLIPSIS
ptc.setupPloneSite(products=['meetshaus.jmscontent'])
class TestCase(ptc.PloneTestCase):
class layer(PloneSite):
@classmethod
def setUp(cls):
zcml.load_config('configure.zcml',
meetshaus.jmscontent)
@classmethod
def tearDown(cls):
pass
def test_suite():
return unittest.TestSuite([
# Unit tests
#doctestunit.DocFileSuite(
# 'README.txt', package='meetshaus.jmscontent',
# setUp=testing.setUp, tearDown=testing.tearDown),
#doctestunit.DocTestSuite(
# module='meetshaus.jmscontent.mymodule',
# setUp=testing.setUp, tearDown=testing.tearDown),
# Integration tests that use PloneTestCase
ztc.ZopeDocFileSuite(
'INTEGRATION.txt',
package='meetshaus.jmscontent',
optionflags = OPTION_FLAGS,
test_class=TestCase),
#ztc.FunctionalDocFileSuite(
# 'BROWSER.TXT',
# package='meetshaus.jmscontent',
# optionflags = OPTION_FLAGS,
# test_class=TestCase),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 24.936508 | 61 | 0.649905 |
209f86bb4ed9e0a23160652a0a45ff01eefcf070 | 1,874 | py | Python | pychron/canvas/tasks/canvas_plugin.py | ael-noblegas/pychron | 6ebbbb1f66a614972b62b7a9be4c784ae61b5d62 | [
"Apache-2.0"
] | 1 | 2019-02-27T21:57:44.000Z | 2019-02-27T21:57:44.000Z | pychron/canvas/tasks/canvas_plugin.py | ael-noblegas/pychron | 6ebbbb1f66a614972b62b7a9be4c784ae61b5d62 | [
"Apache-2.0"
] | 80 | 2018-07-17T20:10:20.000Z | 2021-08-17T15:38:24.000Z | pychron/canvas/tasks/canvas_plugin.py | AGESLDEO/pychron | 1a81e05d9fba43b797f335ceff6837c016633bcf | [
"Apache-2.0"
] | null | null | null | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from envisage.ui.tasks.task_extension import TaskExtension
from envisage.ui.tasks.task_factory import TaskFactory
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.canvas.tasks.canvas_task import CanvasDesignerTask
from pychron.envisage.tasks.base_task_plugin import BaseTaskPlugin
class CanvasDesignerPlugin(BaseTaskPlugin):
id = 'pychron.canvas_designer.plugin'
def _task_extensions_default(self):
return [TaskExtension(actions=[])]
def _tasks_default(self):
ts = [TaskFactory(id='pychron.canvas_designer',
name='Canvas Designer',
factory=self._task_factory,
accelerator='Ctrl+Shift+D',
)]
return ts
def _task_factory(self):
t = CanvasDesignerTask()
return t
def _preferences_panes_default(self):
return []
# ============= EOF =============================================
| 36.745098 | 81 | 0.584845 |
cc8835de70d94b77c8343c3ce4ce4a8305278e2e | 237 | py | Python | odoo-13.0/venv/lib/python3.8/site-packages/reportlab/graphics/charts/__init__.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 12 | 2019-08-02T07:58:16.000Z | 2022-01-31T23:45:08.000Z | odoo-13.0/venv/lib/python3.8/site-packages/reportlab/graphics/charts/__init__.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 10 | 2020-06-05T21:41:01.000Z | 2022-02-10T07:33:38.000Z | odoo-13.0/venv/lib/python3.8/site-packages/reportlab/graphics/charts/__init__.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 11 | 2019-07-31T16:23:36.000Z | 2022-01-29T08:30:07.000Z | #Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
#history https://bitbucket.org/rptlab/reportlab/history-node/tip/src/reportlab/graphics/charts/__init__.py
__version__='3.3.0'
__doc__='''Business charts'''
| 39.5 | 106 | 0.797468 |
ad9e3c129836f3fc9ce77e8e6afb1fb98392b1d0 | 14,609 | py | Python | datalogger/DataListener.py | geraet2/pylarexx | 1da7d52b25faa3fa5440eb679100b402b2c89441 | [
"Apache-2.0"
] | 3 | 2020-01-18T13:25:49.000Z | 2020-08-20T09:32:26.000Z | datalogger/DataListener.py | geraet2/pylarexx | 1da7d52b25faa3fa5440eb679100b402b2c89441 | [
"Apache-2.0"
] | 5 | 2020-01-27T08:48:12.000Z | 2020-07-23T12:12:36.000Z | datalogger/DataListener.py | geraet2/pylarexx | 1da7d52b25faa3fa5440eb679100b402b2c89441 | [
"Apache-2.0"
] | 4 | 2019-02-15T08:04:48.000Z | 2020-07-26T19:22:53.000Z | '''
Created on 23.11.2017
updated on 19.01.2020
@author: Florian Gleixner
@Updater: Inonoob
@license: pylarexx is licensed under the Apache License, version 2, see License.txt
DataListener Objects can be added to a Logger instance by configuration of "output".
DataListener get all values from the Sensor instances through the Logger. The can write them to stdout, to file,
serve them on a tcp socket, put them in a database (not implemented) ....
'''
import time
import logging
import socketserver
import threading
try:
import paho.mqtt.client as mqtt
except ModuleNotFoundError:
logging.warn('No mqtt support')
import json
import sqlite3
try:
from influxdb import InfluxDBClient
except ModuleNotFoundError:
logging.warn('No influxdb support')
from datetime import datetime
class DataListener(object):
def __init__(self, params):
self.params = params
def onNewData(self, data, sensor):
raise NotImplementedError
class LoggingListener(DataListener):
'''
Listener that uses logging to print data. For debugging purposes
'''
def onNewData(self, data, sensor):
logging.info("Datapoint: sensorid %s, raw data: %d cooked: %f %s timestamp: %d from sensor %s type %s" % (
sensor.displayid, data['rawvalue'], sensor.rawToCooked(data['rawvalue']), sensor.unit,
data['timestamp'], sensor.name, sensor.type))
class InfluxDBListener(DataListener):
def __init__(self, params):
super().__init__(params)
self.host = self.params.get('host','127.0.0.1')
self.port = self.params.get('port','8086')
self.user = self.params.get('user','pi')
self.password = self.params.get('password','raspberry')
self.dbname = self.params.get('dbname')
def onNewData(self, data, sensor):
client = InfluxDBClient(self.host, self.port, self.user, self.password, self.dbname)
if 'timestamp' in data:
timestamp = datetime.utcfromtimestamp(data["timestamp"]).strftime('%Y-%m-%dT%H:%M:%SZ')
else:
timestamp = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
json_body = [
{
"measurement": "arexx",
"tags": {
"Location": sensor.name,
"sensorid": sensor.displayid,
"SensorType": sensor.type,
"Unit": sensor.unit
},
"time": timestamp,
"fields": {
"SensorValue": sensor.rawToCooked(data['rawvalue'])
}
}
]
client.write_points(json_body)
class Sqlite3Listener(DataListener):
'''
Listener that outputs into an sqlite database
'''
def __init__(self, params):
super().__init__(params)
self.filename = self.params.get('filename', '/tmp/pylarexx.db')
def onNewData(self, data, sensor):
conn = sqlite3.connect(self.filename)
curs = conn.cursor()
sqlTable = '''CREATE TABLE IF NOT EXISTS pylarexx (id INTEGER PRIMARY KEY, timestamp long, Location string, sensorid integer, SensorType string, SensorValue float, Unit string);'''
sqlValues ='''INSERT INTO pylarexx (timestamp, Location, sensorid, SensorType, SensorValue, Unit) VALUES (?,?,?,?,?,?);'''
data_tuple = (data['timestamp'], sensor.name, sensor.displayid,sensor.type,sensor.rawToCooked(data['rawvalue']),sensor.unit)
curs.execute(sqlTable)
curs.execute(sqlValues,data_tuple)
conn.commit()
conn.close()
class FileOutListener(DataListener):
'''
Listener that saves Data to a file
'''
def __init__(self, params):
super().__init__(params)
self.filename = self.params.get('filename', '/tmp/pylarexx.out')
self.status = 'not initialized'
self.openLogfile()
def openLogfile(self):
try:
# TODO: close file
self.fd = open(self.filename, 'a')
self.status = 'ready'
except Exception as e:
self.status = 'error'
logging.error("FileOutListener: Unable to open file %s. Error message: %s" % (self.filename, e))
def onNewData(self, data, sensor):
if self.status != 'ready':
self.openLogfile()
if self.status == 'ready':
if data['signal'] == None:
signaltext = "-"
else:
signaltext = str(data['signal'])
self.fd.write('%d,%d,%f %s,%d,%s,%s,%s\n' % (
sensor.displayid, data['rawvalue'], sensor.rawToCooked(data['rawvalue']), sensor.unit,
data['timestamp'], signaltext, sensor.name, sensor.type))
def __del__(self):
self.fd.close()
class RecentValuesListener(DataListener):
'''
Listener holds last value from each sensor. Listener can be queried over tcp
'''
def __init__(self, params):
super().__init__(params)
self.values = {}
self.sensors = {}
self.ready = False
self.openListeningPort()
self.server = None
def openListeningPort(self):
# make values visible in helper class
values = self.values
sensors = self.sensors
# helper classes
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def setup(self):
response = ''
for sid, data in values.items():
sensor = sensors[sid]
if data['signal'] == None:
signaltext = "-"
else:
signaltext = str(data['signal'])
response += '%d,%f %s,%d,%s,%s,%s,%s\n' % (
sensor.displayid, sensor.rawToCooked(data['rawvalue']), sensor.unit, data['timestamp'],
signaltext, sensor.type, sensor.name, sensor.id)
self.request.sendall(bytes(response, 'UTF-8'))
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
# start tcp server
try:
host = self.params.get('host', 'localhost')
port = self.params.get('port', 4711)
logging.info("Creating TCP server at %s:%s" % (host, port))
self.server = ThreadedTCPServer((host, int(port)), ThreadedTCPRequestHandler)
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.daemon = True
logging.debug("Starting TCP server")
server_thread.start()
self.ready = True
except Exception as e:
logging.error("Unable to start TCP Server: %s", e)
def onNewData(self, data, sensor):
self.values[sensor.id] = data
self.sensors[sensor.id] = sensor
if not self.ready:
self.openListeningPort()
def __del__(self):
self.server.server_close()
class MQTTListener(DataListener):
'''
Listener that sends values to a MQTT Broker
Data are formatted following the mqtt homie convention:
https://homieiot.github.io/
https://homieiot.github.io/specification/
and/or the home assistant mqtt auto discovery format
https://www.home-assistant.io/docs/mqtt/discovery/
'''
def __init__(self, params):
super().__init__(params)
self.mqttClient = mqtt.Client()
self.values = {}
self.ready = False
self.connect()
def on_connect(self, client, userdata, flags, rc):
logging.info("Connected to mqtt broker with result code %d", rc)
# Subscribe to anything? Not at the moment.
def on_message(self, client, userdata, msg):
logging.debug("Got message from mqtt broker: %s / %s", (msg.topic, msg.payload))
def connect(self):
try:
host = self.params.get('host', 'localhost')
port = self.params.get('port', 1883)
logging.info("Connecting to mqtt broker at %s:%s" % (host, port))
self.mqttClient.on_connect = self.on_connect;
self.mqttClient.on_message = self.on_message;
self.mqttClient.connect(host, port)
self.mqttClient.loop_start()
self.ready = True
except Exception as e:
logging.error("Unable to communicate with mqtt broker: %s", e)
def onNewData(self, data, sensor):
payloadFormat = self.params.get('payload_format', 'home-assistant')
if payloadFormat == 'homie':
self.sendHomieMessages(data, sensor)
if payloadFormat == 'home-assistant':
self.sendHomeAssistantMessage(data, sensor)
def sendHomeAssistantMessage(self, data, sensor):
try:
newSensor = False
self.values[sensor.displayid]
except Exception as e:
newSensor = True
if self.ready:
try:
topicroot = '%s/%s' % (self.params.get('mqtt_base_topic', 'homeassistant'), 'sensor')
topicconfig = '%s/%s_%s/config' % (
topicroot, self.params.get('mqtt_device', 'pylarexx'), sensor.displayid)
topicstate = '%s/%s_%s/state' % (
topicroot, self.params.get('mqtt_device', 'pylarexx'), sensor.displayid)
if newSensor:
logging.debug('New Sensor config')
unit_of_measurement = sensor.unit
if unit_of_measurement == '%RH':
unit_of_measurement = '%'
stype=sensor.type.lower()
if stype == "relative humidity":
stype="humidity"
payload = {'name': '%s %s' % (sensor.name, sensor.type),
'device_class': stype,
'state_topic': topicstate,
'unit_of_measurement': unit_of_measurement,
'value_template': '{{value_json.%s}}' % stype,
}
self.mqttClient.publish(topicconfig, json.dumps(payload), 0, True)
statePayload = {}
statePayload[sensor.type.lower()] = '%.2f' % sensor.rawToCooked(data['rawvalue'])
self.mqttClient.publish(topicstate, json.dumps(statePayload))
except Exception as e:
logging.error("Error publishing mqtt messages: %s", e)
def sendHomieMessages(self, data, sensor):
try:
newSensor = False
self.values[sensor.displayid]
except Exception as e:
newSensor = True
self.values[sensor.displayid] = data
if self.ready:
try:
topicroot = '%s/%s' % (
self.params.get('mqtt_base_topic', 'homie'), self.params.get('mqtt_device', 'pylarexx'))
logging.debug("publishing MQTT messages with topic root %s" % topicroot)
if newSensor:
logging.debug("Updating MQTT device")
self.mqttClient.publish('%s/$homie' % topicroot, self.params.get('homie_convention_version', '3.0'),
0, True)
self.mqttClient.publish('%s/$name' % topicroot, self.params.get('mqtt_device_name',
'Python MQTT Adapter for Arexx Multilogger'),
0, True)
nodes = []
for sid, value in self.values.items():
nodes.append('sensor_%d' % sid)
nodestring = ','.join(nodes)
self.mqttClient.publish('%s/$nodes' % topicroot, nodestring, 0, True) # does this work?
self.mqttClient.publish('%s/$state' % topicroot, "ready", 0, True)
for sid, value in self.values.items():
logging.debug("Sending MQTT sensor values")
self.mqttClient.publish('%s/sensor_%d/$type' % (topicroot, sid),
value['sensor'].manufacturerType)
self.mqttClient.publish('%s/sensor_%d/$name' % (topicroot, sid), value['sensor'].name)
self.mqttClient.publish('%s/sensor_%d/$properties' % (topicroot, sid),
value['sensor'].type.lower())
self.mqttClient.publish(
'%s/sensor_%d/%s/$name' % (topicroot, sid, value['sensor'].type.lower()),
'%s %s' % (value['sensor'].name, value['sensor'].type))
self.mqttClient.publish(
'%s/sensor_%d/%s/$datatype' % (topicroot, sid, value['sensor'].type.lower()), 'float')
self.mqttClient.publish(
'%s/sensor_%d/%s/$unit' % (topicroot, sid, value['sensor'].type.lower()),
value['sensor'].unit)
self.mqttClient.publish('%s/sensor_%d/%s' % (topicroot, sid, value['sensor'].type.lower()),
'%.2f' % value['sensor'].rawToCooked(value['rawvalue']))
else:
logging.debug("Sending MQTT sensor values")
sid = sensor.displayid
self.mqttClient.publish('%s/sensor_%d/$type' % (topicroot, sid), sensor.manufacturerType)
self.mqttClient.publish('%s/sensor_%d/$name' % (topicroot, sid), sensor.name)
self.mqttClient.publish('%s/sensor_%d/$properties' % (topicroot, sid), sensor.type.lower())
self.mqttClient.publish('%s/sensor_%d/%s/$name' % (topicroot, sid, sensor.type.lower()),
'%s %s' % (sensor.name, sensor.type))
self.mqttClient.publish('%s/sensor_%d/%s/$datatype' % (topicroot, sid, sensor.type.lower()),
'float')
self.mqttClient.publish('%s/sensor_%d/%s/$unit' % (topicroot, sid, sensor.type.lower()),
sensor.unit)
self.mqttClient.publish('%s/sensor_%d/%s' % (topicroot, sid, sensor.type.lower()),
'%.2f' % sensor.rawToCooked(data['rawvalue']))
except Exception as e:
logging.error("Error publishing mqtt messages: %s", e)
| 40.921569 | 188 | 0.549593 |
facf39f33f91294814fc36d7c97339acb3ea1708 | 2,555 | py | Python | test/test_iris_vendor_slack.py | dwang159/iris-api | f607fe4c5595469766470b899950d97570e8aebe | [
"BSD-2-Clause"
] | null | null | null | test/test_iris_vendor_slack.py | dwang159/iris-api | f607fe4c5595469766470b899950d97570e8aebe | [
"BSD-2-Clause"
] | null | null | null | test/test_iris_vendor_slack.py | dwang159/iris-api | f607fe4c5595469766470b899950d97570e8aebe | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from iris.vendors.iris_slack import iris_slack
import ujson as json
def test_atttachments_construction_for_incident():
slack_vendor = iris_slack({
'auth_token': 'abc',
'iris_incident_url': 'http://foo.bar/incidents',
'message_attachments': {
'fallback': 'foo fallback',
'pretext': 'foo pretext',
}
})
fake_msg = {
'application': 'grafana',
'incident_id': 123,
'body': u'test body',
'message_id': 456,
'destination': 'user1'
}
msg_payload = slack_vendor.get_message_payload(fake_msg)
assert msg_payload['text'] == '[grafana] %s' % fake_msg['body']
assert msg_payload['token'] == 'abc'
assert msg_payload['channel'] == '@user1'
attachments = msg_payload['attachments']
assert json.loads(attachments) == [{
'fallback': 'foo fallback',
'pretext': 'foo pretext',
'title': 'Iris incident %r' % fake_msg['incident_id'],
'mrkdwn_in': ['pretext'],
'attachment_type': 'default',
'callback_id': fake_msg['message_id'],
'color': 'danger',
'title_link': 'http://foo.bar/incidents/%d' % fake_msg['incident_id'],
'actions': [
{
'name': 'claim',
'text': 'Claim Incident',
'type': 'button',
'value': 'claimed'
},
{
'name': 'claim all',
'text': 'Claim All',
'style': 'danger',
'type': 'button',
'value': 'claimed all',
"confirm": {
"title": "Are you sure?",
"text": "This will claim all active incidents targeting you.",
"ok_text": "Yes",
"dismiss_text": "No"
}
}
]
}]
def test_atttachments_construction_for_notification():
slack_vendor = iris_slack({
'auth_token': 'abc',
'iris_incident_url': 'http://foo.bar/incidents',
'message_attachments': {
'fallback': 'foo fallback',
'pretext': 'foo pretext',
}
})
fake_msg = {
'application': 'grafana',
'body': 'test body notification',
'destination': 'user1'
}
msg_payload = slack_vendor.get_message_payload(fake_msg)
assert msg_payload == {
'text': '[grafana] %s' % fake_msg['body'],
'token': 'abc',
'channel': '@user1'
}
| 30.783133 | 82 | 0.509589 |
8336881ae4a446e58ea36d5ab297459f53cebfd2 | 744 | py | Python | app/app/tests/conftest.py | mutalimov95/fastapi-mongodb-example | fc0ca6cf7e7b1ec1036a95cdad750da6a6b0e2cc | [
"MIT"
] | null | null | null | app/app/tests/conftest.py | mutalimov95/fastapi-mongodb-example | fc0ca6cf7e7b1ec1036a95cdad750da6a6b0e2cc | [
"MIT"
] | null | null | null | app/app/tests/conftest.py | mutalimov95/fastapi-mongodb-example | fc0ca6cf7e7b1ec1036a95cdad750da6a6b0e2cc | [
"MIT"
] | null | null | null | import pytest
from app.core.config import settings
from app.db.mongodb import db
from app.tests.utils.user import authentication_token_from_email
from app.tests.utils.utils import get_server_api, get_superuser_token_headers
@pytest.fixture(scope="module")
def server_api():
return get_server_api()
@pytest.fixture(scope="module")
def superuser_token_headers():
return get_superuser_token_headers()
@pytest.fixture(scope="module")
def normal_user_token_headers():
return authentication_token_from_email(settings.EMAIL_TEST_USER)
@pytest.fixture(scope="session", autouse=True)
def db_setup(request):
db.connect_to_mongo()
def db_teardown():
db.close_mongo_connection()
request.addfinalizer(db_teardown)
| 23.25 | 77 | 0.787634 |
524cb7636575ece2c65f8e878fb9d48c672547d1 | 6,974 | py | Python | selfdrive/test/process_replay/test_processes.py | moneypro/openpilot | 4dfd8b97027324e4ac24d44af8998d746f83fae3 | [
"MIT"
] | 4 | 2021-03-27T12:54:21.000Z | 2021-05-25T03:27:48.000Z | selfdrive/test/process_replay/test_processes.py | toyboxZ/openpilot | 9c60d8a7cb95adf81b3abcd839ae7427a7d2ac4f | [
"MIT"
] | 16 | 2021-04-01T00:28:36.000Z | 2021-05-22T15:37:07.000Z | selfdrive/test/process_replay/test_processes.py | toyboxZ/openpilot | 9c60d8a7cb95adf81b3abcd839ae7427a7d2ac4f | [
"MIT"
] | 2 | 2021-05-19T08:19:36.000Z | 2021-06-12T04:53:20.000Z | #!/usr/bin/env python3
import argparse
import os
import sys
from typing import Any
from selfdrive.car.car_helpers import interface_names
from selfdrive.test.process_replay.compare_logs import compare_logs
from selfdrive.test.process_replay.process_replay import CONFIGS, replay_process
from tools.lib.logreader import LogReader
INJECT_MODEL = 0
segments = [
("HYUNDAI", "02c45f73a2e5c6e9|2021-01-01--19-08-22--1"), # HYUNDAI.SONATA
("TOYOTA", "0982d79ebb0de295|2021-01-04--17-13-21--13"), # TOYOTA.PRIUS (INDI)
("TOYOTA2", "0982d79ebb0de295|2021-01-03--20-03-36--6"), # TOYOTA.RAV4 (LQR)
("HONDA", "0982d79ebb0de295|2021-01-08--10-13-10--6"), # HONDA.CIVIC (NIDEC)
("HONDA2", "a8e8bf6a3864361b|2021-01-04--03-01-18--2"), # HONDA.ACCORD (BOSCH)
("CHRYSLER", "52d86230ee29aa84|2021-01-10--17-16-34--30"), # CHRYSLER.PACIFICA
("SUBARU", "4d70bc5e608678be|2021-01-15--17-02-04--5"), # SUBARU.IMPREZA
("GM", "ae3ed0eb20960a20|2021-01-15--15-04-06--8"), # GM.VOLT
("NISSAN", "e4d79cf6b8b19a0d|2021-01-17--14-48-08--7"), # NISSAN.XTRAIL
("VOLKSWAGEN", "4634226ed41b59ea|2021-03-26--14-14-18--14"), # VW.AUDI_A3_MK3
# Enable when port is tested and dascamOnly is no longer set
#("MAZDA", "32a319f057902bb3|2020-04-27--15-18-58--2"), # MAZDA.CX5
]
# dashcamOnly makes don't need to be tested until a full port is done
excluded_interfaces = ["mock", "ford", "mazda"]
BASE_URL = "https://commadataci.blob.core.windows.net/openpilotci/"
# run the full test (including checks) when no args given
FULL_TEST = len(sys.argv) <= 1
def get_segment(segment_name, original=True):
route_name, segment_num = segment_name.rsplit("--", 1)
if original:
rlog_url = BASE_URL + "%s/%s/rlog.bz2" % (route_name.replace("|", "/"), segment_num)
else:
process_replay_dir = os.path.dirname(os.path.abspath(__file__))
model_ref_commit = open(os.path.join(process_replay_dir, "model_ref_commit")).read().strip()
rlog_url = BASE_URL + "%s/%s/rlog_%s.bz2" % (route_name.replace("|", "/"), segment_num, model_ref_commit)
return rlog_url
def test_process(cfg, lr, cmp_log_fn, ignore_fields=None, ignore_msgs=None):
if ignore_fields is None:
ignore_fields = []
if ignore_msgs is None:
ignore_msgs = []
cmp_log_path = cmp_log_fn if os.path.exists(cmp_log_fn) else BASE_URL + os.path.basename(cmp_log_fn)
cmp_log_msgs = list(LogReader(cmp_log_path))
log_msgs = replay_process(cfg, lr)
# check to make sure openpilot is engaged in the route
# TODO: update routes so enable check can run
# failed enable check: honda bosch, hyundai, chrysler, and subaru
if cfg.proc_name == "controlsd" and FULL_TEST and False:
for msg in log_msgs:
if msg.which() == "controlsState":
if msg.controlsState.active:
break
else:
segment = cmp_log_fn.split("/")[-1].split("_")[0]
raise Exception("Route never enabled: %s" % segment)
try:
return compare_logs(cmp_log_msgs, log_msgs, ignore_fields+cfg.ignore, ignore_msgs, cfg.tolerance)
except Exception as e:
return str(e)
def format_diff(results, ref_commit):
diff1, diff2 = "", ""
diff2 += "***** tested against commit %s *****\n" % ref_commit
failed = False
for segment, result in list(results.items()):
diff1 += "***** results for segment %s *****\n" % segment
diff2 += "***** differences for segment %s *****\n" % segment
for proc, diff in list(result.items()):
diff1 += "\t%s\n" % proc
diff2 += "*** process: %s ***\n" % proc
if isinstance(diff, str):
diff1 += "\t\t%s\n" % diff
failed = True
elif len(diff):
cnt = {}
for d in diff:
diff2 += "\t%s\n" % str(d)
k = str(d[1])
cnt[k] = 1 if k not in cnt else cnt[k] + 1
for k, v in sorted(cnt.items()):
diff1 += "\t\t%s: %s\n" % (k, v)
failed = True
return diff1, diff2, failed
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Regression test to identify changes in a process's output")
# whitelist has precedence over blacklist in case both are defined
parser.add_argument("--whitelist-procs", type=str, nargs="*", default=[],
help="Whitelist given processes from the test (e.g. controlsd)")
parser.add_argument("--whitelist-cars", type=str, nargs="*", default=[],
help="Whitelist given cars from the test (e.g. HONDA)")
parser.add_argument("--blacklist-procs", type=str, nargs="*", default=[],
help="Blacklist given processes from the test (e.g. controlsd)")
parser.add_argument("--blacklist-cars", type=str, nargs="*", default=[],
help="Blacklist given cars from the test (e.g. HONDA)")
parser.add_argument("--ignore-fields", type=str, nargs="*", default=[],
help="Extra fields or msgs to ignore (e.g. carState.events)")
parser.add_argument("--ignore-msgs", type=str, nargs="*", default=[],
help="Msgs to ignore (e.g. carEvents)")
args = parser.parse_args()
cars_whitelisted = len(args.whitelist_cars) > 0
procs_whitelisted = len(args.whitelist_procs) > 0
process_replay_dir = os.path.dirname(os.path.abspath(__file__))
try:
ref_commit = open(os.path.join(process_replay_dir, "ref_commit")).read().strip()
except FileNotFoundError:
print("couldn't find reference commit")
sys.exit(1)
print("***** testing against commit %s *****" % ref_commit)
# check to make sure all car brands are tested
if FULL_TEST:
tested_cars = set(c.lower() for c, _ in segments)
untested = (set(interface_names) - set(excluded_interfaces)) - tested_cars
assert len(untested) == 0, "Cars missing routes: %s" % (str(untested))
results: Any = {}
for car_brand, segment in segments:
if (cars_whitelisted and car_brand.upper() not in args.whitelist_cars) or \
(not cars_whitelisted and car_brand.upper() in args.blacklist_cars):
continue
print("***** testing route segment %s *****\n" % segment)
results[segment] = {}
rlog_fn = get_segment(segment)
lr = LogReader(rlog_fn)
for cfg in CONFIGS:
if (procs_whitelisted and cfg.proc_name not in args.whitelist_procs) or \
(not procs_whitelisted and cfg.proc_name in args.blacklist_procs):
continue
cmp_log_fn = os.path.join(process_replay_dir, "%s_%s_%s.bz2" % (segment, cfg.proc_name, ref_commit))
results[segment][cfg.proc_name] = test_process(cfg, lr, cmp_log_fn, args.ignore_fields, args.ignore_msgs)
diff1, diff2, failed = format_diff(results, ref_commit)
with open(os.path.join(process_replay_dir, "diff.txt"), "w") as f:
f.write(diff2)
print(diff1)
if failed:
print("TEST FAILED")
print("\n\nTo update the reference logs for this test run:")
print("./update_refs.py")
else:
print("TEST SUCCEEDED")
sys.exit(int(failed))
| 38.960894 | 111 | 0.656295 |
dac4de9348b930a17e5853238214dd3c6f029904 | 258 | py | Python | src/squad/types.py | douglasdaly/spot-robot | 7a4fdd7eb5fe5fc2d31180ed6b9f7ea21647bea2 | [
"MIT"
] | null | null | null | src/squad/types.py | douglasdaly/spot-robot | 7a4fdd7eb5fe5fc2d31180ed6b9f7ea21647bea2 | [
"MIT"
] | null | null | null | src/squad/types.py | douglasdaly/spot-robot | 7a4fdd7eb5fe5fc2d31180ed6b9f7ea21647bea2 | [
"MIT"
] | null | null | null | from typing import Tuple, TypeVar, Union
import numpy as np
import numpy.typing as npt
Vector = Union[Tuple[float, float, float], npt.NDArray[np.floating]]
VectorT = TypeVar(
"VectorT",
Tuple[float, float, float],
npt.NDArray[np.floating],
)
| 18.428571 | 68 | 0.705426 |
2c2102c6a8d9e5a5f77b3510867cb284bb024274 | 4,278 | py | Python | tests/test_vkclient.py | ekonda/vkpore | b863e06a5b3d0d34240bd032900c5711b97b096b | [
"MIT"
] | 4 | 2019-05-29T10:53:48.000Z | 2020-05-10T14:17:12.000Z | tests/test_vkclient.py | ekonda/vkpore | b863e06a5b3d0d34240bd032900c5711b97b096b | [
"MIT"
] | 25 | 2019-06-17T22:54:15.000Z | 2020-02-06T22:06:24.000Z | tests/test_vkclient.py | ekonda/vkpore | b863e06a5b3d0d34240bd032900c5711b97b096b | [
"MIT"
] | 2 | 2019-08-20T10:32:41.000Z | 2019-09-26T14:15:21.000Z | # pylint: disable=missing-docstring,protected-access,redefined-outer-name
import pytest
import aiohttp
from vkpore.vkclient import VkClient
from .testing_tools import Session
@pytest.mark.asyncio
async def test_initialization():
client = VkClient("token", session=Session())
await client.initialize()
assert client.group_id == 1
assert client.group_name == "Group"
assert client._session.calls == [
(
"https://api.vk.com/method/groups.getById",
{"v": "5.92", "access_token": "token"}
),
(
"https://api.vk.com/method/groups.setLongPollSettings",
{
"v": "5.92", "access_token": "token", "group_id": 1,
"api_version": "5.92", "enabled": 1
}
)
]
@pytest.mark.asyncio
async def test_loop_twice():
client = VkClient("token", session=Session())
client.start()
with pytest.raises(RuntimeError):
client.start()
await client.stop()
@pytest.mark.asyncio
async def test_loop_restart():
client = VkClient("token", session=Session())
assert not client._running_loop
client.start() # Start
assert client._running_loop
await client.stop() # Stop
assert not client._running_loop
client.start() # Start again
assert client._running_loop
await client.stop() # Stop again
assert not client._running_loop
@pytest.mark.asyncio
async def test_request():
client = VkClient("token", session=Session())
client.start()
response = await client.request(
"messages.send", user_id=1, message="hey",
)
assert response == 7347
assert client._session.calls[0] == (
"https://api.vk.com/method/execute",
{
"v": "5.92", "access_token": "token",
"code": 'return [API.messages.send({"user_id": 1, "message": "hey"}),];',
}
)
await client.stop()
@pytest.mark.asyncio
async def test_request_without_loop():
client = VkClient("token", session=Session())
with pytest.raises(RuntimeError):
await client.request("method", arg1="arg1")
@pytest.mark.asyncio
async def test_request_fail():
client = VkClient("token", session=Session(execute_fail=True))
client.start()
response = await client.request(
"messages.send", user_id=1, message="hey",
)
assert response is None
await client.stop()
@pytest.mark.asyncio
async def test_raw_request():
client = VkClient("token", session=Session())
response = await client.raw_request(
"messages.send", user_id=1, message="hey",
)
assert response == 7347
assert client._session.calls == [
(
"https://api.vk.com/method/messages.send",
{"v": "5.92", "access_token": "token", "message": "hey", "user_id": 1},
),
]
@pytest.mark.asyncio
async def test_exception():
client = VkClient("token", session=Session(aiohttp.ClientError))
response = await client.raw_request(
"messages.send", user_id=1, message="hey",
)
assert response is None
@pytest.mark.asyncio
async def test_longpoll():
client = VkClient("token", session=Session())
await client.initialize()
get_updates = client.longpoll()
client.start()
updates = await get_updates() + await get_updates()
assert updates == [
{"type": "no", "object": 1}, {"type": "no", "object": 2},
{"type": "no", "object": 3}, {"type": "no", "object": 1},
{"type": "no", "object": 2}, {"type": "no", "object": 3},
]
await client.stop()
@pytest.mark.asyncio
async def test_longpoll_failed():
client = VkClient("token", session=Session(longpoll_failed=2))
await client.initialize()
get_updates = client.longpoll()
client.start()
updates = await get_updates()
assert updates is None
await client.stop()
@pytest.mark.asyncio
async def test_longpoll_exception():
client = VkClient("token", session=Session())
await client.initialize()
client._session.exception = aiohttp.ClientError
get_updates = client.longpoll({"key": "a", "server": "x.x", "ts": 0})
client.start()
updates = await get_updates()
assert updates is None
await client.stop()
| 23.25 | 85 | 0.623422 |
7905dc86e6b2268b3729640356d766f5c8951bc3 | 712 | py | Python | problem0021.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | problem0021.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | problem0021.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | ###########################
#
# #21 Amicable numbers - Project Euler
# https://projecteuler.net/problem=21
#
# Code by Kevin Marciniak
#
###########################
def sumproperdivisors(num):
sum = 0
for x in range(1, int((num / 2)) + 1):
if num % x == 0:
sum += x
return sum
amicableList = []
for x in range(0, 10000):
temp = sumproperdivisors(x)
if sumproperdivisors(temp) == x and sumproperdivisors(x) == temp and temp != x:
if x not in amicableList and temp not in amicableList:
amicableList.append(x)
amicableList.append(temp)
totalSum = 0
for y in range(0, len(amicableList)):
totalSum += amicableList[y]
print(totalSum)
| 20.941176 | 83 | 0.573034 |
7c3b21b8497fbf509855f5c9a29b7da097e8056e | 27,589 | py | Python | rotkehlchen/tests/exchanges/test_coinbase.py | coblee/rotki | d675f5c2d0df5176337b7b10038524ee74923482 | [
"BSD-3-Clause"
] | 1 | 2020-11-14T12:20:37.000Z | 2020-11-14T12:20:37.000Z | rotkehlchen/tests/exchanges/test_coinbase.py | coblee/rotki | d675f5c2d0df5176337b7b10038524ee74923482 | [
"BSD-3-Clause"
] | 3 | 2021-01-28T21:30:46.000Z | 2022-03-25T19:17:00.000Z | rotkehlchen/tests/exchanges/test_coinbase.py | coblee/rotki | d675f5c2d0df5176337b7b10038524ee74923482 | [
"BSD-3-Clause"
] | null | null | null | from unittest.mock import patch
from rotkehlchen.constants.assets import A_BTC, A_ETH, A_USD
from rotkehlchen.exchanges.coinbase import Coinbase
from rotkehlchen.exchanges.data_structures import AssetMovement, Trade
from rotkehlchen.fval import FVal
from rotkehlchen.tests.utils.history import TEST_END_TS
from rotkehlchen.tests.utils.mock import MockResponse
from rotkehlchen.typing import AssetMovementCategory, Location, TradeType
def test_name():
exchange = Coinbase('a', b'a', object(), object())
assert exchange.name == 'coinbase'
def test_coinbase_query_balances(function_scope_coinbase):
"""Test that coinbase balance query works fine for the happy path"""
coinbase = function_scope_coinbase
def mock_coinbase_accounts(url): # pylint: disable=unused-argument
response = MockResponse(
200,
"""
{
"pagination": {
"ending_before": null,
"starting_after": null,
"limit": 25,
"order": "desc",
"previous_uri": null,
"next_uri": null
},
"data": [
{
"id": "58542935-67b5-56e1-a3f9-42686e07fa40",
"name": "My Vault",
"primary": false,
"type": "vault",
"currency": "BTC",
"balance": {
"amount": "4.00000000",
"currency": "BTC"
},
"created_at": "2015-01-31T20:49:02Z",
"updated_at": "2015-01-31T20:49:02Z",
"resource": "account",
"resource_path": "/v2/accounts/58542935-67b5-56e1-a3f9-42686e07fa40",
"ready": true
},
{
"id": "2bbf394c-193b-5b2a-9155-3b4732659ede",
"name": "My Wallet",
"primary": true,
"type": "wallet",
"currency": "ETH",
"balance": {
"amount": "39.59000000",
"currency": "ETH"
},
"created_at": "2015-01-31T20:49:02Z",
"updated_at": "2015-01-31T20:49:02Z",
"resource": "account",
"resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede"
},
{
"id": "68542935-67b5-56e1-a3f9-42686e07fa40",
"name": "Another Wallet",
"primary": false,
"type": "vault",
"currency": "BTC",
"balance": {
"amount": "1.230000000",
"currency": "BTC"
},
"created_at": "2015-01-31T20:49:02Z",
"updated_at": "2015-01-31T20:49:02Z",
"resource": "account",
"resource_path": "/v2/accounts/68542935-67b5-56e1-a3f9-42686e07fa40",
"ready": true
}
]
}
""",
)
return response
with patch.object(coinbase.session, 'get', side_effect=mock_coinbase_accounts):
balances, msg = coinbase.query_balances()
assert msg == ''
assert len(balances) == 2
assert balances[A_BTC]['amount'] == FVal('5.23')
assert balances[A_ETH]['amount'] == FVal('39.59')
assert 'usd_value' in balances[A_ETH]
assert 'usd_value' in balances[A_BTC]
warnings = coinbase.msg_aggregator.consume_warnings()
errors = coinbase.msg_aggregator.consume_errors()
assert len(warnings) == 0
assert len(errors) == 0
def test_coinbase_query_balances_unexpected_data(function_scope_coinbase):
"""Test that coinbase balance query works fine for the happy path"""
coinbase = function_scope_coinbase
coinbase.cache_ttl_secs = 0
data = """{
"data": [
{
"id": "58542935-67b5-56e1-a3f9-42686e07fa40",
"name": "My Vault",
"primary": false,
"type": "vault",
"currency": "BTC",
"balance": {
"amount": "4.00000000",
"currency": "BTC"
},
"created_at": "2015-01-31T20:49:02Z",
"updated_at": "2015-01-31T20:49:02Z",
"resource": "account",
"resource_path": "/v2/accounts/58542935-67b5-56e1-a3f9-42686e07fa40",
"ready": true
}]}"""
def query_coinbase_and_test(
response_str,
expected_warnings_num,
expected_errors_num,
contains_expected_msg=None,
):
def mock_coinbase_accounts(url): # pylint: disable=unused-argument
return MockResponse(200, response_str)
with patch.object(coinbase.session, 'get', side_effect=mock_coinbase_accounts):
balances, msg = coinbase.query_balances()
warnings = coinbase.msg_aggregator.consume_warnings()
errors = coinbase.msg_aggregator.consume_errors()
if contains_expected_msg:
assert balances is None
assert contains_expected_msg in msg
elif expected_errors_num == 0 and expected_warnings_num == 0:
assert len(warnings) == 0
assert len(errors) == 0
assert msg == ''
assert len(balances) == 1
assert balances[A_BTC]['amount'] == FVal('4')
assert 'usd_value' in balances[A_BTC]
else:
assert len(warnings) == expected_warnings_num
assert len(errors) == expected_errors_num
assert msg == ''
assert len(balances) == 0
# test that all is fine with normal data
query_coinbase_and_test(data, expected_warnings_num=0, expected_errors_num=0)
# From now on unexpected data
# no data key
query_coinbase_and_test(
'{"foo": 1}',
expected_warnings_num=0,
expected_errors_num=0,
contains_expected_msg='Coinbase json response does not contain data',
)
# account entry without "balance" key
input_data = data.replace('"balance"', '"foo"')
query_coinbase_and_test(input_data, expected_warnings_num=0, expected_errors_num=1)
# account entry without amount in "balance"
input_data = data.replace('"amount"', '"foo"')
query_coinbase_and_test(input_data, expected_warnings_num=0, expected_errors_num=1)
# account entry without currency in "balance"
input_data = data.replace('"currency"', '"foo"')
query_coinbase_and_test(input_data, expected_warnings_num=0, expected_errors_num=1)
# account entry with invalid balance amount
input_data = data.replace('"4.00000000"', '"csadasdsd"')
query_coinbase_and_test(input_data, expected_warnings_num=0, expected_errors_num=1)
# account entry with unknown asset
input_data = data.replace('"BTC"', '"DDSADSAD"')
query_coinbase_and_test(input_data, expected_warnings_num=1, expected_errors_num=0)
# account entry with invalid asset
input_data = data.replace('"BTC"', 'null')
query_coinbase_and_test(input_data, expected_warnings_num=0, expected_errors_num=1)
BUYS_RESPONSE = """{
"pagination": {
"ending_before": null,
"starting_after": null,
"limit": 25,
"order": "desc",
"previous_uri": null,
"next_uri": null
},
"data": [{
"id": "9e14d574-30fa-5d85-b02c-6be0d851d61d",
"status": "completed",
"payment_method": {
"id": "83562370-3e5c-51db-87da-752af5ab9559",
"resource": "payment_method",
"resource_path": "/v2/payment-methods/83562370-3e5c-51db-87da-752af5ab9559"
},
"transaction": {
"id": "4117f7d6-5694-5b36-bc8f-847509850ea4",
"resource": "transaction",
"resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede/transactions/441b9494"
},
"amount": {
"amount": "486.34313725",
"currency": "BTC"
},
"total": {
"amount": "4863.43",
"currency": "USD"
},
"subtotal": {
"amount": "4862.42",
"currency": "USD"
},
"created_at": "2017-07-21T23:43:59-07:00",
"updated_at": "2017-07-21T23:43:59-07:00",
"resource": "buy",
"resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede/buys/9e14d574",
"committed": true,
"instant": false,
"fee": {
"amount": "1.01",
"currency": "USD"
},
"payout_at": "2017-07-23T23:44:08Z"
}]}"""
SELLS_RESPONSE = """{
"pagination": {
"ending_before": null,
"starting_after": null,
"limit": 25,
"order": "desc",
"previous_uri": null,
"next_uri": null
},
"data": [{
"id": "1e14d574-30fa-5d85-b02c-6be0d851d61d",
"status": "completed",
"payment_method": {
"id": "23562370-3e5c-51db-87da-752af5ab9559",
"resource": "payment_method",
"resource_path": "/v2/payment-methods/83562370-3e5c-51db-87da-752af5ab9559"
},
"transaction": {
"id": "3117f7d6-5694-5b36-bc8f-847509850ea4",
"resource": "transaction",
"resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede/transactions/4117f7d6"
},
"amount": {
"amount": "100.45",
"currency": "ETH"
},
"total": {
"amount": "8940.12",
"currency": "USD"
},
"subtotal": {
"amount": "8930.02",
"currency": "USD"
},
"created_at": "2015-03-26T13:42:00-07:00",
"updated_at": "2015-03-26T13:42:00-07:00",
"resource": "sell",
"resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede/sells/9e14d574",
"committed": true,
"instant": true,
"fee": {
"amount": "10.1",
"currency": "USD"
},
"payout_at": "2015-04-01T23:43:59-07:00"
}]}"""
DEPOSITS_RESPONSE = """{
"pagination": {
"ending_before": null,
"starting_after": null,
"limit": 25,
"order": "desc",
"previous_uri": null,
"next_uri": null
},
"data": [{
"id": "1130eaec-07d7-54c4-a72c-2e92826897df",
"status": "completed",
"payment_method": {
"id": "83562370-3e5c-51db-87da-752af5ab9559",
"resource": "payment_method",
"resource_path": "/v2/payment-methods/83562370-3e5c-51db-87da-752af5ab9559"
},
"transaction": {
"id": "441b9494-b3f0-5b98-b9b0-4d82c21c252a",
"resource": "transaction",
"resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede/transactions/441b9494"
},
"amount": {
"amount": "55.00",
"currency": "USD"
},
"subtotal": {
"amount": "54.95",
"currency": "USD"
},
"created_at": "2015-01-31T20:49:02Z",
"updated_at": "2015-02-11T16:54:02-08:00",
"resource": "deposit",
"resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede/deposits/67e0eaec",
"committed": true,
"fee": {
"amount": "0.05",
"currency": "USD"
},
"payout_at": "2018-02-18T16:54:00-08:00"
}]}"""
WITHDRAWALS_RESPONSE = """{
"pagination": {
"ending_before": null,
"starting_after": null,
"limit": 25,
"order": "desc",
"previous_uri": null,
"next_uri": null
},
"data": [{
"id": "146eaec-07d7-54c4-a72c-2e92826897df",
"status": "completed",
"payment_method": {
"id": "85562970-3e5c-51db-87da-752af5ab9559",
"resource": "payment_method",
"resource_path": "/v2/payment-methods/83562370-3e5c-51db-87da-752af5ab9559"
},
"transaction": {
"id": "441b9454-b3f0-5b98-b9b0-4d82c21c252a",
"resource": "transaction",
"resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede/transactions/441b9494"
},
"amount": {
"amount": "10.00",
"currency": "USD"
},
"subtotal": {
"amount": "9.99",
"currency": "USD"
},
"created_at": "2017-01-31T20:49:02Z",
"updated_at": "2017-01-31T20:49:02Z",
"resource": "withdrawal",
"resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede/withdrawals/67e0eaec",
"committed": true,
"fee": {
"amount": "0.01",
"currency": "USD"
},
"payout_at": null
}]}"""
TRANSACTIONS_RESPONSE = """{
"pagination": {
"ending_before": null,
"starting_after": null,
"limit": 25,
"order": "desc",
"previous_uri": null,
"next_uri": null
},
"data": [{
"id": "id1",
"type": "send",
"status": "completed",
"amount": {
"amount": "-0.05770427",
"currency": "ETH"
},
"native_amount": {
"amount": "-9.83",
"currency": "EUR"
},
"description": null,
"created_at": "2019-08-25T09:42:06Z",
"updated_at": "2019-08-25T09:43:42Z",
"resource": "transaction",
"resource_path": "/v2/accounts/foo/transactions/boo",
"instant_exchange": false,
"network": {
"status": "confirmed",
"hash": "0x558bfa4d2a4ef598ddb92233459c00eda9e6c14cda75e6773b90208cb6938169",
"transaction_url": "https://etherscan.io/tx/bbb",
"transaction_fee": {"amount": "0.00021", "currency": "ETH"},
"transaction_amount": {"amount": "0.05749427", "currency": "ETH"},
"confirmations": 86
},
"to": {
"resource": "ethereum_address",
"address": "0x6dcd6449dbca615e40d696328209686ea95327b2",
"currency": "ETH",
"address_info": {"address": "0xboo"}
},
"idem": "zzzz",
"details": {"title": "Sent Ethereum", "subtitle": "To Ethereum address"}
}]}"""
def mock_normal_coinbase_query(url): # pylint: disable=unused-argument
if 'buys' in url:
return MockResponse(200, BUYS_RESPONSE)
elif 'sells' in url:
return MockResponse(200, SELLS_RESPONSE)
elif 'deposits' in url:
return MockResponse(200, DEPOSITS_RESPONSE)
elif 'withdrawals' in url:
return MockResponse(200, WITHDRAWALS_RESPONSE)
elif 'transactions' in url:
return MockResponse(200, TRANSACTIONS_RESPONSE)
elif 'accounts' in url:
# keep it simple just return a single ID and ignore the rest of the fields
return MockResponse(200, '{"data": [{"id": "5fs23"}]}')
else:
raise AssertionError(f'Unexpected url {url} for test')
def test_coinbase_query_trade_history(function_scope_coinbase):
"""Test that coinbase trade history query works fine for the happy path"""
coinbase = function_scope_coinbase
with patch.object(coinbase.session, 'get', side_effect=mock_normal_coinbase_query):
trades = coinbase.query_trade_history(
start_ts=0,
end_ts=TEST_END_TS,
)
warnings = coinbase.msg_aggregator.consume_warnings()
errors = coinbase.msg_aggregator.consume_errors()
assert len(warnings) == 0
assert len(errors) == 0
assert len(trades) == 2
expected_trades = [Trade(
timestamp=1500853448,
location=Location.COINBASE,
pair='BTC_USD',
trade_type=TradeType.BUY,
amount=FVal("486.34313725"),
rate=FVal("9.997920454875299055122012005"),
fee=FVal("1.01"),
fee_currency=A_USD,
link='9e14d574-30fa-5d85-b02c-6be0d851d61d',
), Trade(
timestamp=1427402520,
location=Location.COINBASE,
pair='ETH_USD',
trade_type=TradeType.SELL,
amount=FVal("100.45"),
rate=FVal("88.90014932802389248382279741"),
fee=FVal("10.1"),
fee_currency=A_USD,
link='1e14d574-30fa-5d85-b02c-6be0d851d61d',
)]
assert trades == expected_trades
# and now try only a smaller time range
with patch.object(coinbase.session, 'get', side_effect=mock_normal_coinbase_query):
trades = coinbase.query_trade_history(
start_ts=0,
end_ts=1451606400,
)
warnings = coinbase.msg_aggregator.consume_warnings()
errors = coinbase.msg_aggregator.consume_errors()
assert len(warnings) == 0
assert len(errors) == 0
assert len(trades) == 1
assert trades[0].trade_type == TradeType.SELL
assert trades[0].timestamp == 1427402520
def query_coinbase_and_test(
coinbase,
query_fn_name,
buys_response=BUYS_RESPONSE,
buys_paginated_end=BUYS_RESPONSE,
sells_response=SELLS_RESPONSE,
sells_paginated_end=SELLS_RESPONSE,
deposits_response=DEPOSITS_RESPONSE,
withdrawals_response=WITHDRAWALS_RESPONSE,
expected_warnings_num=0,
expected_errors_num=0,
# Since this test only mocks as breaking only one of the two actions by default
expected_actions_num=1,
):
def mock_coinbase_query(url): # pylint: disable=unused-argument
if 'buys' in url:
if 'next-page' in url:
return MockResponse(200, buys_paginated_end)
else:
return MockResponse(200, buys_response)
elif 'sells' in url:
if 'next-page' in url:
return MockResponse(200, sells_paginated_end)
else:
return MockResponse(200, sells_response)
elif 'deposits' in url:
return MockResponse(200, deposits_response)
elif 'withdrawals' in url:
return MockResponse(200, withdrawals_response)
elif 'accounts' in url:
# keep it simple just return a single ID and ignore the rest of the fields
return MockResponse(200, '{"data": [{"id": "5fs23"}]}')
else:
raise AssertionError(f'Unexpected url {url} for test')
query_fn = getattr(coinbase, query_fn_name)
with patch.object(coinbase.session, 'get', side_effect=mock_coinbase_query):
actions = query_fn(
start_ts=0,
end_ts=TEST_END_TS,
)
errors = coinbase.msg_aggregator.consume_errors()
warnings = coinbase.msg_aggregator.consume_warnings()
if expected_errors_num == 0 and expected_warnings_num == 0 and expected_actions_num == 1:
assert len(actions) == 2
assert len(errors) == 0
assert len(warnings) == 0
else:
assert len(actions) == expected_actions_num
assert len(errors) == expected_errors_num
assert len(warnings) == expected_warnings_num
def test_coinbase_query_trade_history_unexpected_data(function_scope_coinbase):
"""Test that coinbase trade history query handles unexpected data properly"""
coinbase = function_scope_coinbase
coinbase.cache_ttl_secs = 0
# first query with proper data and expect no errors
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_trade_history',
expected_warnings_num=0,
expected_errors_num=0,
)
# invalid payout_at timestamp
broken_response = BUYS_RESPONSE.replace('"2017-07-23T23:44:08Z"', 'true')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_trade_history',
buys_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
# invalid created_at timestamp
broken_response = SELLS_RESPONSE.replace('"2015-03-26T13:42:00-07:00"', '"dadssd"')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_trade_history',
sells_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
# unknown asset
broken_response = BUYS_RESPONSE.replace('"BTC"', '"dsadsad"')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_trade_history',
buys_response=broken_response,
expected_warnings_num=1,
expected_errors_num=0,
)
# invalid asset format
broken_response = BUYS_RESPONSE.replace('"BTC"', '123')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_trade_history',
buys_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
# invalid trade type
broken_response = BUYS_RESPONSE.replace('"buy"', 'null')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_trade_history',
buys_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
# invalid amount
broken_response = BUYS_RESPONSE.replace('"486.34313725"', '"gfgfg"')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_trade_history',
buys_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
# invalid subtotal amount
broken_response = BUYS_RESPONSE.replace('"4862.42"', 'false')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_trade_history',
buys_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
# invalid fee amount
broken_response = BUYS_RESPONSE.replace('"1.01"', '"aas"')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_trade_history',
buys_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
# unknown fee asset
broken_response = BUYS_RESPONSE.replace('"USD"', '"DSADSA"')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_trade_history',
buys_response=broken_response,
expected_warnings_num=1,
expected_errors_num=0,
)
# invalid fee asset
broken_response = BUYS_RESPONSE.replace('"USD"', '[]')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_trade_history',
buys_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
# missing key error
broken_response = SELLS_RESPONSE.replace(' "status": "completed",', '')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_trade_history',
buys_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
def test_coinbase_query_trade_history_paginated(function_scope_coinbase):
"""Test that coinbase trade history query can deal with paginated response"""
coinbase = function_scope_coinbase
coinbase.cache_ttl_secs = 0
paginated_buys_response = BUYS_RESPONSE.replace(
'"next_uri": null',
'"next_uri": "/v2/buys/?next-page"',
)
paginated_sells_response = SELLS_RESPONSE.replace(
'"next_uri": null',
'"next_uri": "/v2/sells/?next-page"',
)
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_trade_history',
expected_warnings_num=0,
expected_errors_num=0,
expected_actions_num=4,
buys_response=paginated_buys_response,
sells_response=paginated_sells_response,
)
def test_coinbase_query_deposit_withdrawals(function_scope_coinbase):
"""Test that coinbase deposit/withdrawals history query works fine for the happy path"""
coinbase = function_scope_coinbase
with patch.object(coinbase.session, 'get', side_effect=mock_normal_coinbase_query):
movements = coinbase.query_online_deposits_withdrawals(
start_ts=0,
end_ts=1576726126,
)
warnings = coinbase.msg_aggregator.consume_warnings()
errors = coinbase.msg_aggregator.consume_errors()
assert len(warnings) == 0
assert len(errors) == 0
assert len(movements) == 3
expected_movements = [AssetMovement(
location=Location.COINBASE,
category=AssetMovementCategory.DEPOSIT,
timestamp=1519001640,
address=None,
transaction_id=None,
asset=A_USD,
amount=FVal('55'),
fee_asset=A_USD,
fee=FVal('0.05'),
link='1130eaec-07d7-54c4-a72c-2e92826897df',
), AssetMovement(
location=Location.COINBASE,
category=AssetMovementCategory.WITHDRAWAL,
address=None,
transaction_id=None,
timestamp=1485895742,
asset=A_USD,
amount=FVal('10.0'),
fee_asset=A_USD,
fee=FVal('0.01'),
link='146eaec-07d7-54c4-a72c-2e92826897df',
), AssetMovement(
location=Location.COINBASE,
category=AssetMovementCategory.WITHDRAWAL,
address='0x6dcD6449dbCa615e40d696328209686eA95327b2',
transaction_id='0x558bfa4d2a4ef598ddb92233459c00eda9e6c14cda75e6773b90208cb6938169',
timestamp=1566726126,
asset=A_ETH,
amount=FVal('0.05770427'),
fee_asset=A_ETH,
fee=FVal('0.00021'),
link='id1',
)]
assert expected_movements == movements
# and now try to query within a specific range
with patch.object(coinbase.session, 'get', side_effect=mock_normal_coinbase_query):
movements = coinbase.query_online_deposits_withdrawals(
start_ts=0,
end_ts=1519001650,
)
warnings = coinbase.msg_aggregator.consume_warnings()
errors = coinbase.msg_aggregator.consume_errors()
assert len(warnings) == 0
assert len(errors) == 0
assert len(movements) == 2
assert movements[0].category == AssetMovementCategory.DEPOSIT
assert movements[0].timestamp == 1519001640
assert movements[1].category == AssetMovementCategory.WITHDRAWAL
assert movements[1].timestamp == 1485895742
def test_coinbase_query_deposit_withdrawals_unexpected_data(function_scope_coinbase):
"""Test that coinbase deposit/withdrawals query handles unexpected data properly"""
coinbase = function_scope_coinbase
# first query with proper data and expect no errors
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_deposits_withdrawals',
expected_warnings_num=0,
expected_errors_num=0,
)
# invalid payout_at timestamp
broken_response = DEPOSITS_RESPONSE.replace('"2018-02-18T16:54:00-08:00"', '"dadas"')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_deposits_withdrawals',
deposits_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
# invalid created_at timestamp
broken_response = WITHDRAWALS_RESPONSE.replace('"2017-01-31T20:49:02Z"', '"dadssd"')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_deposits_withdrawals',
withdrawals_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
# invalid asset movement type
broken_response = WITHDRAWALS_RESPONSE.replace('"withdrawal"', 'null')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_deposits_withdrawals',
withdrawals_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
# unknown asset
broken_response = WITHDRAWALS_RESPONSE.replace('"USD"', '"dasdad"')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_deposits_withdrawals',
withdrawals_response=broken_response,
expected_warnings_num=1,
expected_errors_num=0,
)
# invalid asset
broken_response = WITHDRAWALS_RESPONSE.replace('"USD"', '{}')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_deposits_withdrawals',
withdrawals_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
# invalid amount
broken_response = WITHDRAWALS_RESPONSE.replace('"10.00"', 'true')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_deposits_withdrawals',
withdrawals_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
# invalid fee
broken_response = WITHDRAWALS_RESPONSE.replace('"0.01"', '"dasd"')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_deposits_withdrawals',
withdrawals_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
# missing key error
broken_response = DEPOSITS_RESPONSE.replace(' "resource": "deposit",', '')
query_coinbase_and_test(
coinbase=coinbase,
query_fn_name='query_online_deposits_withdrawals',
deposits_response=broken_response,
expected_warnings_num=0,
expected_errors_num=1,
)
| 32.457647 | 98 | 0.648411 |
90ce22e4ab49c4033ac1a18dec9a6a380e1b2b39 | 11,927 | py | Python | enigmatoolbox/vtk_interface/pipeline.py | saratheriver/ENIGMA | c26483bfeb9e035b7ab5f56998d17155aece584f | [
"BSD-3-Clause"
] | null | null | null | enigmatoolbox/vtk_interface/pipeline.py | saratheriver/ENIGMA | c26483bfeb9e035b7ab5f56998d17155aece584f | [
"BSD-3-Clause"
] | null | null | null | enigmatoolbox/vtk_interface/pipeline.py | saratheriver/ENIGMA | c26483bfeb9e035b7ab5f56998d17155aece584f | [
"BSD-3-Clause"
] | 1 | 2020-12-14T18:34:40.000Z | 2020-12-14T18:34:40.000Z | """Pipeline for VTK filters."""
# Author: Oualid Benkarim <oualid.benkarim@mcgill.ca>
# License: BSD 3 clause
from .decorators import wrap_input
from .wrappers.algorithm import BSAlgorithm
from .wrappers.data_object import BSDataObject
# From https://vtk.org/Wiki/VTK/Tutorials/New_Pipeline
# Outputs are referred to by port number while
# inputs are referred to by both their port number and connection number
# (because a single input port can have more than one connection)
@wrap_input(0, 1)
def connect(ftr0, ftr1, port0=0, port1=0, add_conn=False):
"""Connection of two filters.
Connects the output port `port0` of filter `ftr0` with the input port
`port1` of filter `ftr1`.
Parameters
----------
ftr0 : vtkAlgorithm, vtkDataSet, BSAlgorithm or BSDataSet
The input filter. May be a filter or dataset.
ftr1 : vtkAlgorithm or BSAlgorithm
The output filter.
port0 : int, optional
Output port of `ftr0`. Not used if `ftr0` is a dataset. Default is 0.
port1 : int, optional
Input port of `ftr1`. Default is 0.
add_conn : bool or int, optional
Connect to specific connection of `port1`. If False, use
`SetInputConnection` or `SetInputData` (all other added connections to
`port1` are removed). Otherwise, use `AddInputConnection` or
`AddInputData`. If int, add to given connection (e.g.,
`SetInputConnectionByNumber` or `SetInputDataByNumber`). Only used if
`port1` accepts more than one connection (i.e., repeatable).
Default is False.
Returns
-------
ftr1 : BSAlgorithm
Returns (wrapped) `frt1` after connecting it with the input filter.
"""
if isinstance(ftr0, BSAlgorithm) and port0 >= ftr0.nop:
raise ValueError("'{0}' only has {1} output ports.".
format(ftr0.__vtkname__, ftr0.nop))
if port1 >= ftr1.nip:
raise ValueError("'{0}' only accepts {1} input ports.".
format(ftr1.__vtkname__, ftr1.nip))
if add_conn is True or isinstance(add_conn, int):
if ftr1.nip > 1:
raise ValueError("No support yet for 'add_conn' when filter "
"has more than 1 input ports.")
pinfo = ftr1.GetInputPortInformation(port1)
if pinfo.Get(ftr1.INPUT_IS_REPEATABLE()) == 0:
raise ValueError("Input port {0} of '{1}' does not "
"accept multiple connections.".
format(ftr1.nip, ftr1.__vtkname__))
if isinstance(add_conn, int):
if not hasattr(ftr1, 'GetUserManagedInputs') or \
ftr1.GetUserManagedInputs() == 0:
raise ValueError("Input port {0} of '{1}' does not accept "
"connection number."
.format(ftr1.nip, ftr1.__vtkname__))
if isinstance(ftr0, BSAlgorithm):
op = ftr0.GetOutputPort(port0)
if add_conn is True:
# Connection for only 1 input port. Not tested.
ftr1.AddInputConnection(port1, op)
elif isinstance(add_conn, int):
# Connection for only 1 input port. Not tested.
ftr1.SetInputConnectionByNumber(add_conn, op)
else:
ftr1.SetInputConnection(port1, op)
elif isinstance(ftr0, BSDataObject):
ftr0 = ftr0.VTKObject
if add_conn is True:
ftr1.AddInputData(ftr0)
elif isinstance(add_conn, int):
ftr1.SetInputDataByNumber(add_conn, ftr0)
else:
ftr1.SetInputDataObject(port1, ftr0)
else:
raise ValueError('Unknown input filter type: {0}'.format(type(ftr0)))
return ftr1
@wrap_input(0)
def to_data(ftr, port=0):
"""Extract data from filter.
Parameters
----------
ftr : vtkAlgorithm or :class:`.BSAlgorithm`
Input filter.
port : int, optional
Port to get data from. When port is -1, refers to all ports.
Default is 0.
Returns
-------
data : BSDataObject or list of BSDataObject
Returns the output of the filter. If port is -1 and number of output
ports > 1, then return list of outputs.
Notes
-----
Filters are automatically updated to get the output.
"""
list_ports = [port] if port > -1 else range(ftr.nop)
n_ports = len(list_ports)
out = [None] * n_ports
for i, port_id in enumerate(list_ports):
ftr.Update(port_id)
out[i] = ftr.GetOutputDataObject(port_id)
if port > -1:
return out[0]
return out
@wrap_input(0)
def get_output(ftr, as_data=True, update=True, port=0):
"""Get output from filter.
Parameters
----------
ftr : vtkAlgorithm or :class:`.BSAlgorithm`
Input filter.
as_data : bool, optional
Return data as BSDataObject instead of :class:`.BSAlgorithm`. If True,
the filter is automatically updated. Default is True.
update : bool, optional
Update filter. Only used when `as_data` is False. Default is True.
port : int or None, optional
Output port to update or get data from. Only used when input is
vtkAlgorithm. When port is -1, refers to all ports. When None, call
Update() with no arguments. Not used, when `ftr` is a sink
(i.e., 0 output ports), call Update(). Default is 0.
Returns
-------
poly : BSAlgorithm or BSDataObject
Returns filter or its output. If port is -1, returns all outputs in a
list if ``as_data == True``.
"""
if as_data:
return to_data(ftr, port=port)
if update:
if port is None or ftr.is_sink:
try:
ftr.Write()
except AttributeError:
ftr.Update()
elif port > -1:
ftr.Update(port)
else:
# In vtkAlgorithm.cxx, lines 1474-1482
# vtkAlgorithm()->Update() defaults to vtkExecutive()->Update(0)
# if the number of output ports of the algorithm > 0. Otherwise,
# vtkExecutive()->Update(-1)
# In vtkExecutive.cxx lines 310-318, is the same
# In vtkDemandDrivenPipeline.cxx, NeedToExecuteData function,
# lines 1051-1064
# if(outputPort >= 0) Update port outputPort!
# // No port is specified. Check all ports.
# for(int i=0; i < this->Algorithm->GetNumberOfOutputPorts(); ++i)
# When No input port is specified, they go through all ports
# Update individually, just in case
for i in range(ftr.nop):
ftr.Update(i)
return ftr
def _map_input_filter(f):
"""Some comments here."""
if not isinstance(f, (list, tuple)):
return f, 0 # assume is only filter
if isinstance(f, list):
f = tuple(f)
if len(f) == 1: # (fn,)
f += (0,)
elif len(f) > 2:
raise ValueError('Cannot recognize input filter {0}.'.format(f))
return f
def _map_output_filter(f):
"""Some comments here."""
if not isinstance(f, (list, tuple)):
return False, 0, f # assume is only filter
if isinstance(f, list):
f = tuple(f)
if len(f) == 1: # (fn,)
f = (False, 0) + f
elif len(f) == 2: # (ip, fn)
f = (False,) + f
elif len(f) > 3:
raise ValueError('Cannot recognize input filter {0}.'.format(f))
return f
def _map_intermediate_filter(f):
"""Some comments here."""
if not isinstance(f, (list, tuple)):
return False, 0, f, 0 # assume is only filter
if isinstance(f[-1], int): # (..., op)
return _map_output_filter(f[:-1]) + (f[-1],)
return _map_output_filter(f) + (0,)
def serial_connect(*filters, as_data=True, update=True, port=0):
"""Connect filters serially.
Parameters
----------
*filters : sequence of tuple or list
Input filters to serially connect. Each input takes one of the
following formats:
#. First filter in sequence: ``(f0, op=0)``
* `f0` (vtkAlgorithm, :class:`.BSAlgorithm`, vtkDataObject or
:class:`.BSDataObject`) - This is the first filter.
* `op` (int, optional) - This is the output port of `f0`.
Default is 0.
#. Last filter in sequence: ``(ic=None, ip=0, fn)``
* `ic` (int, optional) - This is the input connection of the
input port `ip` of filter `fn`. Default is None.
* `ip` (int, optional) - This is the input port of `fn`. Must be
specified when `ic` is not None. Default is 0.
* `fn` (vtkAlgorithm or :class:`.BSAlgorithm`) - This is the last
filter.
#. Intermediate filters: ``(ic=None, ip=0, fi, op=0)``
* `ic` (int, optional) - This is the input connection of the
input port `ip` of filter `fi`. Default is None.
* `ip` (int, optional) - This is the input port of `fi`. Must be
specified when `ic` is not None. Default is 0.
* `fi` (vtkAlgorithm or :class:`.BSAlgorithm`) - This is a filter.
* `op` (int, optional) - This is the output port of `fi`.
Default is 0.
as_data : bool, optional
Return data instead of filter. If True, last filter is automatically
updated. Default is True.
update : bool, optional
Update last filter. Only used when ``as_data == False``.
Default is True.
port : int, optional
Port to update or get data from. When port is -1, refers to all ports.
Default is 0.
Returns
-------
output : BSAlgorithm or BSDataObject
Last filter or its output.
Examples
--------
In VTK:
>>> # point source
>>> ps = vtk.vtkPointSource()
>>> ps.SetNumberOfPoints(100)
>>> # delauny
>>> dn = vtk.vtkDelaunay2D()
>>> dn.SetTolerance(0.01)
>>> dn.SetInputConnection(0, ps.GetOutputPort(0))
>>> # smooth
>>> sf = vtk.vtkWindowedSincPolyDataFilter()
>>> sf.SetInputConnection(0, dn.GetOutputPort(0))
>>> sf.SetNumberOfIterations(20)
>>> # update and get output
>>> sf.Update()
>>> sf.GetOutput(0)
(vtkCommonDataModelPython.vtkPolyData)0x7f0134fffb28
With `serial_connect` function:
>>> from brainspace.vtk_interface.pipeline import serial_connect
>>> # point source
>>> ps = vtk.vtkPointSource()
>>> ps.SetNumberOfPoints(100)
>>> # delauny
>>> dn = vtk.vtkDelaunay2D()
>>> dn.SetTolerance(0.01)
>>> # smooth
>>> sf = vtk.vtkWindowedSincPolyDataFilter()
>>> sf.SetNumberOfIterations(20)
>>> # Connection
>>> serial_connect((ps, 0), (None, 0, dn, 0), (None, 0, sf), as_data=True,
... port=0)
<brainspace.vtk_interface.wrappers.BSPolyData at 0x7f0134efb048>
>>> # This can be shortened, since no input connection is needed
>>> serial_connect((ps, 0), (0, dn, 0), (0, sf), as_data=True, port=0)
<brainspace.vtk_interface.wrappers.BSPolyData at 0x7f0134ee9128>
>>> # And shortened even further since the default input and output
>>> # ports are 0
>>> serial_connect((ps,), (dn,), (sf,), as_data=True, port=0)
<brainspace.vtk_interface.wrappers.BSPolyData at 0x7f0134ee92b0>
>>> # This is the same
>>> serial_connect(ps, dn, sf)
<brainspace.vtk_interface.wrappers.BSPolyData at 0x7f0134eee898>
"""
prev_f, prev_op = _map_input_filter(filters[0])
for _, f1 in enumerate(filters[1:-1]):
ic, ip, fi, op = _map_intermediate_filter(f1)
prev_f = connect(prev_f, fi, port0=prev_op, port1=ip, add_conn=ic)
prev_op = op
ic, ip, fo = _map_output_filter(filters[-1])
fo = connect(prev_f, fo, port0=prev_op, port1=ip, add_conn=ic)
return get_output(fo, as_data=as_data, update=update, port=port)
| 34.571014 | 78 | 0.599061 |
2cd7804bde5d27e973c18782719ff7eb3549cac9 | 1,271 | py | Python | test/functional/p2p-mempool.py | BullCoin-Project/BullCoin | e01c79c13d27312a64ca5943a5cbd93cfbc18482 | [
"MIT"
] | null | null | null | test/functional/p2p-mempool.py | BullCoin-Project/BullCoin | e01c79c13d27312a64ca5943a5cbd93cfbc18482 | [
"MIT"
] | null | null | null | test/functional/p2p-mempool.py | BullCoin-Project/BullCoin | e01c79c13d27312a64ca5943a5cbd93cfbc18482 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Bull Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.mininode import *
from test_framework.test_framework import BullTestFramework
from test_framework.util import *
class P2PMempoolTests(BullTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
#connect a mininode
aTestNode = NodeConnCB()
node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
aTestNode.add_connection(node)
NetworkThread().start()
aTestNode.wait_for_verack()
#request mempool
aTestNode.send_message(msg_mempool())
aTestNode.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| 32.589744 | 75 | 0.707317 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.