hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
82a5daea9d746a5e0fd1a18fd73ba8a7a242e08f | 612 | py | Python | web_app/cornwall/views.py | blackradley/heathmynd | 4495f8fadef9d3a36a7d5b49fae2b61cceb158bc | [
"MIT"
] | null | null | null | web_app/cornwall/views.py | blackradley/heathmynd | 4495f8fadef9d3a36a7d5b49fae2b61cceb158bc | [
"MIT"
] | 4 | 2018-11-06T16:15:10.000Z | 2018-11-07T12:03:09.000Z | web_app/cornwall/views.py | blackradley/heathmynd | 4495f8fadef9d3a36a7d5b49fae2b61cceb158bc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" test """
from __future__ import unicode_literals
from django.template.loader import get_template
from django.contrib import messages
# Create your views here.
from django.http import HttpResponse
def index(request):
""" index """
template = get_template('cornwall/index.html')
messages.set_level(request, messages.DEBUG)
list(messages.get_messages(request))# clear out the previous messages
messages.add_message(request, messages.INFO, 'Hello world.')
context = {'nbar': 'cornwall'}
html = template.render(context, request)
return HttpResponse(html)
| 32.210526 | 73 | 0.730392 | 75 | 612 | 5.826667 | 0.573333 | 0.06865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001927 | 0.151961 | 612 | 18 | 74 | 34 | 0.840077 | 0.148693 | 0 | 0 | 0 | 0 | 0.08498 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.333333 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
82b549e4607fd2be9e74cf5b94bf6e0c4162ac8a | 1,198 | py | Python | src/user_auth_api/serializers.py | Adstefnum/mockexams | af5681b034334be9c5aaf807161ca80a8a1b9948 | [
"BSD-3-Clause"
] | null | null | null | src/user_auth_api/serializers.py | Adstefnum/mockexams | af5681b034334be9c5aaf807161ca80a8a1b9948 | [
"BSD-3-Clause"
] | null | null | null | src/user_auth_api/serializers.py | Adstefnum/mockexams | af5681b034334be9c5aaf807161ca80a8a1b9948 | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import serializers
from user_auth_api.models import User
# User Serializer
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = [
'user_name',
'email',
'current_jamb_score',
'phone_num',
'last_name',
'first_name',
'is_staff',
'is_superuser',
'uuid',
'is_active',
'last_login',
'date_joined',
]
# Register Serializer
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = [
'user_name',
'email',
'password',
'current_jamb_score',
'phone_num',
'last_name',
'first_name',
'uuid',
]
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User.objects.create_user(
validated_data['user_name'],
validated_data['email'],validated_data['current_jamb_score'],
validated_data['phone_num'],validated_data['password'],
validated_data['last_name'],validated_data['first_name']
)
return user | 22.603774 | 73 | 0.576795 | 116 | 1,198 | 5.637931 | 0.413793 | 0.159021 | 0.073395 | 0.107034 | 0.318043 | 0.318043 | 0.318043 | 0.318043 | 0.318043 | 0.192661 | 0 | 0 | 0.310518 | 1,198 | 53 | 74 | 22.603774 | 0.791768 | 0.029215 | 0 | 0.487805 | 0 | 0 | 0.234281 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0.073171 | 0.04878 | 0 | 0.195122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
82b593a5d04b8635ad9d0bfca619ad7a94f582c9 | 2,671 | py | Python | cv_utils/cv_util_node.py | OAkyildiz/cibr_img_processing | 69f3293db80e9c0ae57369eaf2885b94adb330df | [
"MIT"
] | null | null | null | cv_utils/cv_util_node.py | OAkyildiz/cibr_img_processing | 69f3293db80e9c0ae57369eaf2885b94adb330df | [
"MIT"
] | null | null | null | cv_utils/cv_util_node.py | OAkyildiz/cibr_img_processing | 69f3293db80e9c0ae57369eaf2885b94adb330df | [
"MIT"
] | null | null | null | import sys
import rospy
import types
#from std_msgs.msg import String
from sensor_msgs.msg import Image
from cibr_img_processing.msg import Ints
from cv_bridge import CvBridge, CvBridgeError
#make int msgs
#TODO: get the img size from camera_indo topics
class CVUtilNode: # abstarct this, it can easily work with other cv_utils and be an image bbm_node
def __init__(self, util, name="cv_util_node", pub_topic=False):
#self.obj_pub = rospy.Publisher("image_topic_2", ***)
self.bridge = CvBridge()
self.util=util
self.name=name
rospy.init_node(self.name, anonymous=True)
self.rate=rospy.Rate(30)
self.image_sub = rospy.Subscriber("image_topic", Image, self.callback)
self.result_pub = rospy.Publisher("results", Ints, queue_size=10) #always publish data
self.result_msgs = [-1,-1,-1] #make int msgs
self.pubs=lambda:0
self.subs=[]
if pub_topic:
self.image_pub = rospy.Publisher(pub_topic,Image, queue_size=10)
pass #do stuff with img.pub
def callback(self,data):
try:
self.util.hook(self.bridge.imgmsg_to_cv2(data, "bgr8"))
except CvBridgeError as e:
print(e)
def data_pub(self):
self.result_pub.publish(self.util.results) #try catch
def img_pub(cv_image): # to handleconverting from OpenCV to ROS
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
except CvBridgeError as e:
print(e)
def run(self):
self.util.init_windows()
while not rospy.is_shutdown():
try:
if self.util.loop(): break
if not -1 in self.util.results and self.util._publish:
self.data_pub()
self.util._publish = 0
# if self.util._publish:
# for pub in self.pubs:
# pub.publish
#self.rate.sleep()
except KeyboardInterrupt:
self.util.shutdown()
self.util.shutdown()
#adds a publisher to alirlaes,
def attach_pub(self, topic, type):
self.pubs.pub.append(rospy.Publisher(topic, type, queue_size=1))
# TODO:attach structs of publisher and message template instead
# so it is iterable together
#pubs.pub=... pubs.msg=type()
def attach_sub(self, topic, cb_handle):
self.subs.append = rospy.Subscriber(topic, type, cb_handle)
def attach_controls(self, fun_handle):
# bind the method to instance
self.util.external_ops=types.MethodType(fun_handle,self.util)
| 33.810127 | 98 | 0.622613 | 359 | 2,671 | 4.48468 | 0.356546 | 0.069565 | 0.031677 | 0.031056 | 0.043478 | 0.043478 | 0.043478 | 0.043478 | 0 | 0 | 0 | 0.00937 | 0.280794 | 2,671 | 78 | 99 | 34.24359 | 0.828735 | 0.217896 | 0 | 0.18 | 0 | 0 | 0.018357 | 0 | 0 | 0 | 0 | 0.012821 | 0 | 1 | 0.16 | false | 0.02 | 0.12 | 0 | 0.3 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
82b8f3579fbf367d54a1259558d837656079d6f8 | 448 | py | Python | pokepay/request/get_shop.py | pokepay/pokepay-partner-python-sdk | 7437370dc1cd0bde38959713015074315291b1e1 | [
"MIT"
] | null | null | null | pokepay/request/get_shop.py | pokepay/pokepay-partner-python-sdk | 7437370dc1cd0bde38959713015074315291b1e1 | [
"MIT"
] | null | null | null | pokepay/request/get_shop.py | pokepay/pokepay-partner-python-sdk | 7437370dc1cd0bde38959713015074315291b1e1 | [
"MIT"
] | 1 | 2022-01-28T03:00:12.000Z | 2022-01-28T03:00:12.000Z | # DO NOT EDIT: File is generated by code generator.
from pokepay_partner_python_sdk.pokepay.request.request import PokepayRequest
from pokepay_partner_python_sdk.pokepay.response.shop_with_accounts import ShopWithAccounts
class GetShop(PokepayRequest):
def __init__(self, shop_id):
self.path = "/shops" + "/" + shop_id
self.method = "GET"
self.body_params = {}
self.response_class = ShopWithAccounts
| 32 | 91 | 0.725446 | 54 | 448 | 5.722222 | 0.62963 | 0.071197 | 0.116505 | 0.15534 | 0.220065 | 0.220065 | 0 | 0 | 0 | 0 | 0 | 0 | 0.194196 | 448 | 13 | 92 | 34.461538 | 0.855956 | 0.109375 | 0 | 0 | 1 | 0 | 0.025189 | 0 | 0.125 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
82badbb757028140899a1d3ea355a9a115e4d31b | 726 | py | Python | dataStructures/complete.py | KarlParkinson/practice | 6bbbd4a8e320732523d83297c1021f52601a20d8 | [
"MIT"
] | null | null | null | dataStructures/complete.py | KarlParkinson/practice | 6bbbd4a8e320732523d83297c1021f52601a20d8 | [
"MIT"
] | null | null | null | dataStructures/complete.py | KarlParkinson/practice | 6bbbd4a8e320732523d83297c1021f52601a20d8 | [
"MIT"
] | null | null | null | import binTree
import queue
def complete(tree):
q = queue.Queue()
nonFull = False
q.enqueue(tree)
while (not q.isEmpty()):
t = q.dequeue()
if (t.getLeftChild()):
if (nonFull):
return False
q.enqueue(t.getLeftChild())
if (t.getLeftChild() == None):
nonFull = True
if (t.getRightChild()):
if (nonFull):
return False
q.enqueue(t.getRightChild())
if (t.getRightChild() == None):
nonFull = True
return True
t = binTree.BinaryTree(1)
t.insertLeft(2)
t.insertRight(3)
t.getRightChild().insertLeft(5)
t.getRightChild().insertRight(6)
print complete(t)
| 21.352941 | 40 | 0.541322 | 79 | 726 | 4.974684 | 0.379747 | 0.178117 | 0.099237 | 0.101781 | 0.147583 | 0.147583 | 0.147583 | 0 | 0 | 0 | 0 | 0.010288 | 0.330579 | 726 | 33 | 41 | 22 | 0.798354 | 0 | 0 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.074074 | null | null | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
82bea645f31e2de3666e262ad0a20085ef770deb | 656 | py | Python | email_extras/admin.py | maqmigh/django-email-extras | c991b59fa53f9a5324ea7d9f3cc65bc1a9aa8e42 | [
"BSD-2-Clause"
] | 33 | 2015-03-17T12:08:05.000Z | 2021-12-17T23:06:26.000Z | email_extras/admin.py | maqmigh/django-email-extras | c991b59fa53f9a5324ea7d9f3cc65bc1a9aa8e42 | [
"BSD-2-Clause"
] | 26 | 2015-10-09T01:01:00.000Z | 2021-02-09T11:11:52.000Z | email_extras/admin.py | maqmigh/django-email-extras | c991b59fa53f9a5324ea7d9f3cc65bc1a9aa8e42 | [
"BSD-2-Clause"
] | 29 | 2015-02-25T07:51:12.000Z | 2022-02-27T07:05:40.000Z |
from email_extras.settings import USE_GNUPG
if USE_GNUPG:
from django.contrib import admin
from email_extras.models import Key, Address
from email_extras.forms import KeyForm
class KeyAdmin(admin.ModelAdmin):
form = KeyForm
list_display = ('__str__', 'email_addresses')
readonly_fields = ('fingerprint', )
class AddressAdmin(admin.ModelAdmin):
list_display = ('__str__', 'key')
readonly_fields = ('key', )
def has_add_permission(self, request):
return False
admin.site.register(Key, KeyAdmin)
admin.site.register(Address, AddressAdmin)
| 26.24 | 54 | 0.652439 | 71 | 656 | 5.746479 | 0.535211 | 0.066176 | 0.110294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.260671 | 656 | 24 | 55 | 27.333333 | 0.841237 | 0 | 0 | 0 | 0 | 0 | 0.0729 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.25 | 0.0625 | 0.8125 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
82c72df17c47f59db7183dbcc92de68aef849d6a | 11,660 | py | Python | functions_alignComp.py | lauvegar/VLBI_spectral_properties_Bfield | 6d07b6b0549ba266d2c56adcf664219a500e75e8 | [
"MIT"
] | 1 | 2020-03-14T14:55:17.000Z | 2020-03-14T14:55:17.000Z | functions_alignComp.py | lauvegar/VLBI_spectral_properties_Bfield | 6d07b6b0549ba266d2c56adcf664219a500e75e8 | [
"MIT"
] | null | null | null | functions_alignComp.py | lauvegar/VLBI_spectral_properties_Bfield | 6d07b6b0549ba266d2c56adcf664219a500e75e8 | [
"MIT"
] | 1 | 2021-01-29T14:08:16.000Z | 2021-01-29T14:08:16.000Z | import numpy as np
import matplotlib.pyplot as plt
from pylab import *
#import pyspeckit as ps
from scipy import io
from scipy import stats
from scipy.optimize import leastsq
#from lmfit import minimize, Parameters, Parameter, report_fit
#from lmfit.models import GaussianModel
import scipy.optimize as optimization
import matplotlib.ticker as ticker
import cmath as math
import pickle
import iminuit
import astropy.io.fits as pf
import os,glob
#import string,math,sys,fileinput,glob,time
#load modules
#from pylab import *
import subprocess as sub
import re
#from plot_components import get_ellipse_coords, ellipse_axis
import urllib2
from astropy import units as u
#from astropy.coordinates import SkyCoord
#FUNCTION TO READ THE HEADER AND TAKE IMPORTANT PARAMETERS AS
#cell
#BMAJ, BMIN, BPA
#date, freq and epoch
def find_nearest(array,value):
index = (np.abs(array-value)).argmin()
return array[index], index
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split('(\d+)', text) ]
def get_ellipse_coords(a=0.0, b=0.0, x=0.0, y=0.0, angle=0.0, k=2):
""" Draws an ellipse using (360*k + 1) discrete points; based on pseudo code
given at http://en.wikipedia.org/wiki/Ellipse
k = 1 means 361 points (degree by degree)
a = major axis distance,
b = minor axis distance,
x = offset along the x-axis
y = offset along the y-axis
angle = clockwise rotation [in degrees] of the ellipse;
* angle=0 : the ellipse is aligned with the positive x-axis
* angle=30 : rotated 30 degrees clockwise from positive x-axis
"""
pts = np.zeros((360*k+1, 2))
beta = -angle * np.pi/180.0
sin_beta = np.sin(beta)
cos_beta = np.cos(beta)
alpha = np.radians(np.r_[0.:360.:1j*(360*k+1)])
sin_alpha = np.sin(alpha)
cos_alpha = np.cos(alpha)
pts[:, 0] = x + (a * cos_alpha * cos_beta - b * sin_alpha * sin_beta)
pts[:, 1] = y + (a * cos_alpha * sin_beta + b * sin_alpha * cos_beta)
return pts
def ellipse_axis(x, y,s):
x1=x-s
x2=x+s
if x1<x2:
xaxis=np.linspace(x1,x2,50)
else:
xaxis=np.linspace(x2,x1,50)
y1=y-s
y2=y+s
if y1<y2:
yaxis=np.linspace(y1,y2,50)
else:
yaxis=np.linspace(y2,y1,50)
return xaxis,yaxis
def ellipse_axis_lines(x,y,size):
pts_arr=[]
pt_arr=[]
x_el_arr=[]
x_elH_arr=[]
y_el_arr=[]
y_elH_arr=[]
for i in xrange(0,len(x)):
n = len(x[i])
pts, pt = [], []
x_el, y_el = [], []
x_elH, y_elH = [], []
for k in xrange(0,n):
pts.append(get_ellipse_coords(a=size[i][k], b=size[i][k], x=x[i][k],y=y[i][k], angle=0))
pt.append(get_ellipse_coords(a=0.01, b=0.01, x=x[i][k],y=y[i][k], angle=0))
#lines axis ellipses
x_el.append(ellipse_axis(x=float(x[i][k]),y=float(y[i][k]),s=float(size[i][k]))[0])
y_el.append(ellipse_axis(x=x[i][k],y=y[i][k],s=size[i][k])[1])
x_elH.append(np.linspace(x[i][k],x[i][k],50))
y_elH.append(np.linspace(y[i][k],y[i][k],50))
pts_arr.append(pts)
pt_arr.append(pt)
x_el_arr.append(x_el)
y_el_arr.append(y_el)
x_elH_arr.append(x_elH)
y_elH_arr.append(y_elH)
return pts_arr,pt_arr,x_el_arr,y_el_arr,x_elH_arr,y_elH_arr
def read_modfile(file1,beam,errors):
nfiles = len(file1)
r_arr = []
errr_arr = [] #np.array([0.]*nfiles)
psi_arr = []
errpsi_arr = []
size_arr = []
errsize_arr = []
flux_arr = []
errflux_arr = []
ntot=0
for k in xrange (0,nfiles):
with open(file1[k]) as myfile:
count = sum(1 for line in myfile if line.rstrip('\n'))
count = count-4
#n = len(rms[k])
n = count
split_f=[]
c=[]
r=np.array([0.]*n)
errr=np.array([0.]*n)
psi=np.array([0.]*n)
errpsi=np.array([0.]*n)
size=np.array([0.]*n)
errsize=np.array([0.]*n)
tb=np.array([0.]*n)
errtb=np.array([0.]*n)
flux=np.array([0.]*n)
fluxpeak = np.array([0.]*n)
rms = np.array([0.]*n)
errflux=np.array([0.]*n)
lim_resol=np.array([0.]*n)
errlim_resol=np.array([0.]*n)
temp=file1[k]
temp_file=open(temp,mode='r')
temp_file.readline()
temp_file.readline()
temp_file.readline()
temp_file.readline()
for i in xrange(0,n):
split_f = temp_file.readline().split()
flux[i] = (float(split_f[0][:-1]))
r[i] = (float(split_f[1][:-1]))
psi[i] = (float(split_f[2][:-1])*np.pi/180.)
size[i] = (float(split_f[3][:-1])/2.)
#tb[i] = (float(split_f[7]))
if errors == True:
temp_file2=open('pos_errors.dat',mode='r')
temp_file2.readline()
temp_file2.readline()
for i in xrange(0,ntot):
temp_file2.readline()
for i in xrange(0,n):
split_f = temp_file2.readline().split()
fluxpeak[i] = (float(split_f[2][:-1]))
rms[i] = (float(split_f[1][:-1]))
for i in xrange(0,n):
errflux[i] = rms[i]
snr = fluxpeak[i]/rms[i]#[k][i] #change to flux_peak
dlim = 4/np.pi*np.sqrt(np.pi*np.log(2)*beam[k]*np.log((snr)/(snr-1.))) #np.log((snr+1.)/(snr))) 4/np.pi*beam
if size[i] > beam[k]:
ddec=np.sqrt(size[i]**2-beam[k]**2)
else:
ddec=0.
y=[dlim,ddec]
dg=np.max(y)
err_size = rms[i]*dlim/fluxpeak[i]
err_r = err_size/2.
if r[i] > 0.:
err_psi = np.real(math.atan(err_r*180./(np.pi*r[i])))
else:
err_psi = 1./5*beam[k]
if err_size < 2./5.*beam[k]:
errsize[i] = 2./5.*beam[k]
else:
errsize[i] = (err_size)
if err_r < 1./5*beam:
errr[i] = 1./5*beam
if errr[i] < 1./2.*size[i]:
errr[i] = 1./2.*size[i]
else:
errr[i] = (err_r)
errpsi[i] = (err_psi)
elif errors == 'Done':
print 'done'
else:
for i in xrange(0,n):
errflux[i] = 0.1*flux[i]
errr[i] = 1./5.*beam[k]
errpsi[i] = 0.
errsize[i] = 2./5*beam[k]
r_arr.append(r)
errr_arr.append(errr)
psi_arr.append(psi)
errpsi_arr.append(errpsi)
size_arr.append(size)
errsize_arr.append(errsize)
flux_arr.append(flux)
errflux_arr.append(errflux)
ntot = n + ntot + 1
return r_arr,errr_arr,psi_arr,errpsi_arr,size_arr,errsize_arr,tb,flux_arr,errflux_arr
def x_y(r,errr,psi,errpsi,errors):
n = len(r)
x,errx = np.array([0.]*n),np.array([0.]*n)
y,erry = np.array([0.]*n),np.array([0.]*n)
x_arr, errx_arr = [], []
y_arr, erry_arr = [], []
for i in xrange (0,n):
x=r[i]*np.sin(psi[i])
y=r[i]*np.cos(psi[i])
if errors == True:
errx=np.sqrt((errr[i]*np.cos(psi[i]))**2+(r[i]*np.sin(psi[i])*errpsi[i])**2)
erry=np.sqrt((errr[i]*np.sin(psi[i]))**2+(r[i]*np.cos(psi[i])*errpsi[i])**2)
else:
errx = errr[i]
erry = errr[i]
x_arr.append(x)
errx_arr.append(errx)
y_arr.append(y)
erry_arr.append(erry)
x_arr = np.asarray(x_arr)
errx_arr = np.asarray(errx_arr)
y_arr = np.asarray(y_arr)
erry_arr = np.asarray(erry_arr)
return x_arr,errx_arr,y_arr,erry_arr
def r_psi(x,errx,y,erry):
n = len(r)
r,errr = np.array([0.]*n),np.array([0.]*n)
psi,errpsi = np.array([0.]*n),np.array([0.]*n)
r_arr, errr_arr = [], []
psi_arr, errpsi_arr = [], []
for i in xrange (0,n):
r=np.sqrt(x[i]**2+y[i]**2)
psi=np.atan(y[i]/x[i])
#errr=np.sqrt((1/(2*r)*2*x[i]*errx[i])**2+(1/(2*r)*2*y[i]*erry[i])**2)
#errpsi=np.sqrt(((y[i]/([x[i]**2+y[i])**2])*errx[i])**2+((x[i]/([x[i]**2+y[i])**2])*erry[i])**2)
r_arr.append(r)
#errr_arr.append(errr)
psi_arr.append(psi)
#errpsi_arr.append(errpsi)
return r_arr,psi_arr
def selectComponent(realDAT,realDAT2, first_contour, pts_arr,x_el_arr,x_elH_arr,y_elH_arr,y_el_arr,ext,freq1,freq2,x,y,numComp,orientation):
levels = first_contour[0]*np.array([-1., 1., 1.41,2.,2.83,4.,5.66,8.,11.3,16.,
22.6,32.,45.3,64.,90.5,128.,181.,256.,362.,512.,
724.,1020.,1450.,2050.])
plt.figure(10)
plt.subplot(121)
cset = plt.contour(realDAT, levels, inline=1,
colors=['grey'],
extent=ext, aspect=1.0
)
for j in xrange(0,len(x_el_arr[0])):
plt.plot(pts_arr[0][j][:,0], pts_arr[0][j][:,1], color='blue',linewidth=4)
plt.plot(x_el_arr[0][j], y_elH_arr[0][j], color='blue',linewidth=4)
plt.plot(x_elH_arr[0][j], y_el_arr[0][j], color='blue',linewidth=4)
plt.xlim(ext[0],ext[1])
plt.ylim(ext[2],ext[3])
plt.axis('scaled')
plt.xlabel('Right Ascension [pixels]')
plt.ylabel('Relative Declination [pixels]')
plt.title(str('%1.3f' %(freq1))+' GHz')
levels = first_contour[1]*np.array([-1., 1., 1.41,2.,2.83,4.,5.66,8.,11.3,16.,
22.6,32.,45.3,64.,90.5,128.,181.,256.,362.,512.,
724.,1020.,1450.,2050.])
#plt.figure(2)
plt.subplot(122)
cset = plt.contour(realDAT2, levels, inline=1,
colors=['grey'],
extent=ext, aspect=1.0
)
for j in xrange(0,len(x_el_arr[1])):
plt.plot(pts_arr[1][j][:,0], pts_arr[1][j][:,1], color='blue',linewidth=4)
plt.plot(x_el_arr[1][j], y_elH_arr[1][j], color='blue',linewidth=4)
plt.plot(x_elH_arr[1][j], y_el_arr[1][j], color='blue',linewidth=4)
plt.xlim(ext[0],ext[1])
plt.ylim(ext[2],ext[3])
plt.axis('scaled')
plt.xlabel('Right Ascension [pixels]')
plt.title(str('%1.3f' %(freq2))+' GHz')
param = ginput(4*numComp,0)
near_comp1 = []
near_comp2 = []
a = 0
if orientation == 'h':
for i in xrange(0,numComp):
x_c = float(param[1+a][0])
near_comp1.append(int(find_nearest(x[0],x_c)[1]))
x_c = float(param[3+a][0])
near_comp2.append(int(find_nearest(x[1],x_c)[1]))
a = a + 4
if orientation == 'v':
for i in xrange(0,numComp):
y_c = float(param[1+a][1])
near_comp1.append(int(find_nearest(y[0],y_c)[1]))
y_c = float(param[3+a][1])
near_comp2.append(int(find_nearest(y[1],y_c)[1]))
a = a + 4
plt.show()
return near_comp1, near_comp2
def CoreShiftCalculation(indexes,x,y,errx,erry,numComp):
#indexes[0] low freq, indexes[1] high frequency
#shift high freq - low freq
if numComp == 1:
RaShift = x[1][indexes[1][0]]-x[0][indexes[0][0]]
DecShift = y[1][indexes[1][0]]-y[0][indexes[0][0]]
errRaShift = np.sqrt((errx[1][indexes[1][0]])**2+(errx[0][indexes[0][0]])**2)
errDecShift = np.sqrt((erry[1][indexes[1][0]])**2+(erry[0][indexes[0][0]])**2)
if numComp > 1:
#calculate all the Ra and Dec shifts and do an average
RaShiftArr = np.asarray([0.]*numComp)
DecShiftArr = np.asarray([0.]*numComp)
for i in xrange(0,numComp):
RaShiftArr[i] = x[1][indexes[1][i]]-x[0][indexes[0][i]]
DecShiftArr[i] = y[1][indexes[1][i]]-y[0][indexes[0][i]]
RaShift = np.sum(RaShiftArr)/len(RaShiftArr)
DecShift = np.sum(DecShiftArr)/len(DecShiftArr)
if numComp < 4:
#not enough values to do a proper dispersion, I consider the values' error as more reliable
errRaShiftArr = np.asarray([0.]*numComp)
errDecShiftArr = np.asarray([0.]*numComp)
for i in xrange(0,numComp):
#no square root because I need to square them later in the sum, so i avoid unnecessary calculations
errRaShiftArr[i] = (errx[1][indexes[1][i]])**2+(errx[0][indexes[0][i]])**2
errDecShiftArr[i] = (erry[1][indexes[1][i]])**2+(erry[0][indexes[0][i]])**2
errRaShift = np.sqrt(np.sum(errRaShiftArr))/numComp
errDecShift = np.sqrt(np.sum(errDecShiftArr))/numComp
else:
#statistical error
errRaShift = np.sqrt(np.sum((RaShiftArr-RaShift)**2))/(np.sqrt(numComp-1))
errDecShift = np.sqrt(np.sum((DecShiftArr-DecShift)**2))/(np.sqrt(numComp-1))
return RaShift, DecShift, errRaShift, errDecShift
| 29.004975 | 140 | 0.613036 | 2,113 | 11,660 | 3.276858 | 0.167534 | 0.008377 | 0.026574 | 0.028596 | 0.339688 | 0.256932 | 0.203062 | 0.193385 | 0.13807 | 0.129983 | 0 | 0.051037 | 0.184991 | 11,660 | 401 | 141 | 29.077307 | 0.677576 | 0.094168 | 0 | 0.177474 | 0 | 0 | 0.017467 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.05802 | null | null | 0.003413 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
82cb0803d2457f595d667a7981bfa23935775448 | 1,096 | py | Python | src/wallet/web/schemas/categories.py | clayman-micro/wallet | b78f650aed7d57167db81a0530fd78dbc12d527e | [
"MIT"
] | 2 | 2015-10-18T15:36:37.000Z | 2015-10-19T04:57:00.000Z | src/wallet/web/schemas/categories.py | clayman74/wallet | b78f650aed7d57167db81a0530fd78dbc12d527e | [
"MIT"
] | 7 | 2021-06-26T16:51:13.000Z | 2021-11-29T19:05:00.000Z | src/wallet/web/schemas/categories.py | clayman-micro/wallet | b78f650aed7d57167db81a0530fd78dbc12d527e | [
"MIT"
] | null | null | null | from aiohttp_micro.web.handlers.openapi import PayloadSchema, ResponseSchema
from marshmallow import fields, post_load, Schema
from wallet.core.entities.categories import CategoryFilters
from wallet.web.schemas.abc import CollectionFiltersSchema
class CategorySchema(Schema):
key = fields.Int(required=True, data_key="id", description="Category id")
name = fields.Str(required=True, description="Category name")
class CategoriesResponseSchema(ResponseSchema):
"""Categories list."""
categories = fields.List(fields.Nested(CategorySchema), required=True, description="Categories")
class CategoriesFilterSchema(CollectionFiltersSchema):
"""Filter categories list."""
@post_load
def make_payload(self, data, **kwargs):
return CategoryFilters(user=self.context["user"])
class ManageCategoryPayloadSchema(PayloadSchema):
"""Add new category."""
name = fields.Str(required=True, description="Category name")
class CategoryResponseSchema(ResponseSchema):
"""Get category info."""
category = fields.Nested(CategorySchema, required=True)
| 29.621622 | 100 | 0.762774 | 113 | 1,096 | 7.353982 | 0.469027 | 0.072202 | 0.083032 | 0.050542 | 0.219013 | 0.127557 | 0.127557 | 0.127557 | 0.127557 | 0 | 0 | 0 | 0.126825 | 1,096 | 36 | 101 | 30.444444 | 0.868339 | 0.070255 | 0 | 0.117647 | 0 | 0 | 0.053106 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.235294 | 0.058824 | 0.941176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
82cfea168601da39ca8ee801205fdee39d24a8a0 | 446 | py | Python | week/templatetags/sidebar_data.py | uno-isqa-8950/fitgirl-inc | 2656e7340e85ab8cbeb0de19dcbc81030b9b5b81 | [
"MIT"
] | 6 | 2018-09-11T15:30:10.000Z | 2020-01-14T17:29:07.000Z | week/templatetags/sidebar_data.py | uno-isqa-8950/fitgirl-inc | 2656e7340e85ab8cbeb0de19dcbc81030b9b5b81 | [
"MIT"
] | 722 | 2018-08-29T17:27:38.000Z | 2022-03-11T23:28:33.000Z | week/templatetags/sidebar_data.py | uno-isqa-8950/fitgirl-inc | 2656e7340e85ab8cbeb0de19dcbc81030b9b5b81 | [
"MIT"
] | 13 | 2018-08-29T07:42:01.000Z | 2019-04-21T22:34:30.000Z | from django import template
from week.models import SidebarContentPage,SidebarImagePage
register = template.Library()
@register.inclusion_tag('week/announcement.html')
def sidebar():
sidebar_data = SidebarContentPage.objects.get()
return {'sidebar_data':sidebar_data}
@register.inclusion_tag('week/advertisement.html')
def sidebarimage():
sidebar_image = SidebarImagePage.objects.get()
return {'sidebar_image':sidebar_image} | 26.235294 | 59 | 0.784753 | 49 | 446 | 6.979592 | 0.469388 | 0.096491 | 0.116959 | 0.140351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107623 | 446 | 17 | 60 | 26.235294 | 0.859296 | 0 | 0 | 0 | 0 | 0 | 0.1566 | 0.100671 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.181818 | 0 | 0.545455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
82d236c6e0b9c063b565077e0441849e2549c37e | 1,097 | py | Python | tests/functional/Hydro/AcousticWave/CSPH_mod_package.py | jmikeowen/Spheral | 3e1082a7aefd6b328bd3ae24ca1a477108cfc3c4 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 22 | 2018-07-31T21:38:22.000Z | 2020-06-29T08:58:33.000Z | tests/Hydro/AcousticWave/CSPH_mod_package.py | markguozhiming/spheral | bbb982102e61edb8a1d00cf780bfa571835e1b61 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 41 | 2020-09-28T23:14:27.000Z | 2022-03-28T17:01:33.000Z | tests/Hydro/AcousticWave/CSPH_mod_package.py | markguozhiming/spheral | bbb982102e61edb8a1d00cf780bfa571835e1b61 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 7 | 2019-12-01T07:00:06.000Z | 2020-09-15T21:12:39.000Z | #-------------------------------------------------------------------------------
# A mock physics package to mess around with the CRKSPH corrections.
#-------------------------------------------------------------------------------
from Spheral1d import *
class CRKSPH_mod_package(Physics):
def __init__(self):
Physics.__init__(self)
return
def evaluateDerivatives(self, t, dt, db, state, derivs):
return
def dt(self, db, state, derivs, t):
return pair_double_string(1e100, "No vote")
def registerState(self, dt, state):
return
def registerDerivatives(self, db, derivs):
return
def label(self):
return "CRKSPH_mod_package"
def initialize(self, t, dt, db, state, derivs):
# Grab the CRKSPH arrays.
A0_fl = state.scalarFields(HydroFieldNames.A0_CRKSPH)
A_fl = state.scalarFields(HydroFieldNames.A_CRKSPH)
B_fl = state.vectorFields(HydroFieldNames.B_CRKSPH)
A0 = A0_fl[0]
A = A_fl[0]
B = B_fl[0]
print "A", A.internalValues()
return
| 26.756098 | 80 | 0.539654 | 118 | 1,097 | 4.822034 | 0.398305 | 0.063269 | 0.068541 | 0.031634 | 0.070299 | 0.070299 | 0 | 0 | 0 | 0 | 0 | 0.014303 | 0.235187 | 1,097 | 40 | 81 | 27.425 | 0.663886 | 0.226983 | 0 | 0.208333 | 0 | 0 | 0.030842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.041667 | null | null | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
82d3d58b46fde9d57d6d1387e15cc36141a10208 | 7,676 | py | Python | movie.py | jmclinn/mapdraw | bdbddb164a82a3cf9b2673006caae4274948a420 | [
"MIT"
] | null | null | null | movie.py | jmclinn/mapdraw | bdbddb164a82a3cf9b2673006caae4274948a420 | [
"MIT"
] | null | null | null | movie.py | jmclinn/mapdraw | bdbddb164a82a3cf9b2673006caae4274948a420 | [
"MIT"
] | null | null | null | import os,time
## File Variable (USER INPUT)
## ==========================
## if multiple files are being accessed to create movie...
## ...specify the beginning and ending of the file names...
## ...and the date list text file in the variables below
## Please use True or False to set whether multiple files will be accessed for movie
file_is_variable = False
## If file_is_variable = True
## --------------------------
## make sure to leave trailing slash '/' on 'path_to_files'
path_to_files = '/path/to/files/'
## For series of files with similar prefixes (file_part1) and filetypes (file_part2)
file_part1 = 'pre.fixes.'
file_part2 = '.nc'
## location of file listing (with each entry on a new line) the variable part of the filename
dates_list_text_file = '/path/to/file/variable_list.txt'
## If file_is_variable = False
## ---------------------------
#file = '/path/to/single/file.nc'
file = '/Users/Jon/Documents/other_projects/Aluie/visuals/1-12/mapdraw/sgs.nc'
## Variables (USER INPUT)
## ======================
## all variable lists must be the same length
## set unused variables equal to '_empty_'
## if variable requires double-quotes on command line include them --> '" ... "'
## -----------------------------------------------------------------------------
data = 'sgsflux' #cannot be '_empty_'
lat = 'u_lat' #cannot be '_empty_'
lon = 'u_lon' #cannot be '_empty_'
depth = 'w_dep,9' #cannot be '_empty_'
mask = '-1e33,#000000'
maxr = '100' #use for 'max'
minr = '-100' #use for 'min'
norm = '_empty_'
colors = '"0:#0000AA,45:#0000FF,50:#FFFFFF,55:#FF0000,100:#AA0000"'
clr_min_max = '_empty_'
title = '_empty_'
crop = '_empty_'
lines = '_empty_'
## Sphere (for mapping onto Earth's spherical representation)
## ----------------------------------------------------------
## For use of 'sphere' set to True. If not leave False.
sphere_mapping = False
## Number of images (must match other variable list lengths from above)
sphere_frames = 3
## Start and stop points of sphere rotation (leave start/stop the same for no rotation in lat/lon)
sphere_lon_start = -10
sphere_lon_stop = 10
sphere_lat_start = -10
sphere_lat_stop = 10
## 'zoom' argument described in README file (leave False if zoom = 1)
zoom = 1.5
## Primary Variable (USER INPUT)
## =============================
## choose from the variables above
## specify without quotes
## if not a list will only output single result
## --------------------------------------------
primary_variable = file
## Save Location (USER INPUT)
## ==========================
## provide folder location (without filename(s))
## ---------------------------------------------
save = '/Users/Jon/Desktop/'
## Image Filename Prefix (USER INPUT)
## ==================================
## prefix for output filenames before auto-incremented counter
## -----------------------------------------------------------
file_prefix = 'img_'
## Image Counter Start (USER INPUT)
## ================================
## start of auto-incremented counter
## ---------------------------------
count_start = 0
## Image File Type (USER INPUT)
## ============================
## ex: '.png' or '.jpg'
## --------------------
img_type = '.png'
## Display Toggle (USER INPUT)
## ==========================
## toggle if each image displays in the loop
## use 'yes' or 'no' to control display preference
## -----------------------------------------------
display = 'no'
# # # # # # # # # # # # # # # # # # # # # # # # #
# ---- NO USER INPUTS AFTER THIS POINT ---- #
# # # # # # # # # # # # # # # # # # # # # # # # #
## If 'file' is variable this establishes list of files to loop through (Do Not Alter)
## ===================================================================================
if file_is_variable:
file1 = []
file0 = open(dates_list_text_file,'r').read().splitlines()
for line in file0:
file1.append(str(path_to_files) + str(file_part1) + str(line) + str(file_part2))
file = file1
primary_variable = file
## Parsing of 'sphere' rotation inputs (Do Not Alter)
## ==================================================
if sphere_mapping:
lon_step = ( sphere_lon_stop - sphere_lon_start ) / ( sphere_frames - 1 )
lat_step = ( sphere_lat_stop - sphere_lat_start ) / ( sphere_frames - 1 )
sphere = []
for i in range(sphere_frames):
sphere.append(str(sphere_lon_start + lon_step * i)+','+str(sphere_lat_start + lat_step * i))
primary_variable = sphere
## Defining & Executing Command Expression (Do Not Alter)
## ======================================================
displayx = 'display ' + display
command = displayx
if title != '_empty_':
titlex = ' title ' + str(title)
command = command + titlex
if lines != '_empty_':
linesx = ' lines ' + str(lines)
command = command + linesx
if type(primary_variable) is list:
loop_len = len(primary_variable)
else:
loop_len = 1
for i in range(loop_len):
savex = ' save ' + str(save) + str(file_prefix) + str(i + int(count_start)) + str(img_type)
command = command + savex
if type(file) is list:
filei = file[i]
else:
filei = file
if i != '_empty_':
filex = ' file ' + str(filei)
command = command + filex
if type(data) is list:
datai = data[i]
else:
datai = data
if datai != '_empty_':
datax = ' data ' + str(datai)
command = command + datax
if type(lat) is list:
lati = lat[i]
else:
lati = lat
if lati != '_empty_':
latx = ' lat ' + str(lati)
command = command + latx
if type(lon) is list:
loni = lon[i]
else:
loni = lon
if loni != '_empty_':
lonx = ' lon ' + str(loni)
command = command + lonx
if type(depth) is list:
depthi = depth[i]
else:
depthi = depth
if depthi != '_empty_':
depthx = ' depth ' + str(depthi)
command = command + depthx
if type(mask) is list:
maski = mask[i]
else:
maski = mask
if maski != '_empty_':
maskx = ' mask ' + str(maski)
command = command + maskx
if type(maxr) is list:
maxri = maxr[i]
else:
maxri = maxr
if maxri != '_empty_':
maxrx = ' max ' + str(maxri)
command = command + maxrx
if type(minr) is list:
minri = minr[i]
else:
minri = minr
if minri != '_empty_':
minrx = ' min ' + str(minri)
command = command + minrx
if type(norm) is list:
normi = norm[i]
else:
normi = norm
if normi != '_empty_':
normx = ' norm ' + str(normi)
command = command + normx
if type(crop) is list:
cropi = crop[i]
else:
cropi = crop
if cropi != '_empty_':
cropx = ' crop ' + str(cropi)
command = command + cropx
if type(colors) is list:
colorsi = colors[i]
else:
colorsi = colors
if colorsi != '_empty_':
colorsx = ' colors ' + str(colorsi)
command = command + colorsx
if type(clr_min_max) is list:
clr_min_maxi = clr_min_max[i]
else:
clr_min_maxi = clr_min_max
if clr_min_maxi != '_empty_':
clr_min_maxx = ' clr_min_max ' + str(clr_min_maxi)
command = command + clr_min_maxx
if sphere_mapping:
spherei = sphere[i]
spherex = ' sphere ' + str(spherei)
command = command + spherex
if type(zoom) is list:
zoomi = zoom[i]
elif zoom:
zoomi = zoom
if zoom:
zoomx = ' zoom ' + str(zoomi)
command = command + zoomx
time0 = time.time()
os.system('python map.py ' + command)
if display == 'no':
print str(i) + ' - ' + str(round((time.time() - time0),2)) + ' sec' | 28.220588 | 98 | 0.549635 | 934 | 7,676 | 4.359743 | 0.269807 | 0.058448 | 0.017191 | 0.015717 | 0.017436 | 0.017436 | 0 | 0 | 0 | 0 | 0 | 0.012605 | 0.224857 | 7,676 | 272 | 99 | 28.220588 | 0.671765 | 0.378843 | 0 | 0.10559 | 0 | 0.012422 | 0.116482 | 0.033965 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.006211 | null | null | 0.006211 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
82e0abe3e486e3352d2b626c47850728c42c4ae5 | 2,719 | py | Python | robot_con/baxter/baxter_client.py | takuya-ki/wrs | f6e1009b94332504042fbde9b39323410394ecde | [
"MIT"
] | 23 | 2021-04-02T09:02:04.000Z | 2022-03-22T05:31:03.000Z | robot_con/baxter/baxter_client.py | takuya-ki/wrs | f6e1009b94332504042fbde9b39323410394ecde | [
"MIT"
] | 35 | 2021-04-12T09:41:05.000Z | 2022-03-26T13:32:46.000Z | robot_con/baxter/baxter_client.py | takuya-ki/wrs | f6e1009b94332504042fbde9b39323410394ecde | [
"MIT"
] | 16 | 2021-03-30T11:55:45.000Z | 2022-03-30T07:10:59.000Z | import robotconn.rpc.baxterrobot.baxter_server_pb2 as bxtsp
import robotconn.rpc.baxterrobot.baxter_server_pb2_grpc as bxtspgc
import grpc
import pickle
import numpy as np
class BaxterClient(object):
def __init__(self, host = "localhost:18300"):
channel = grpc.insecure_channel(host)
self.stub = bxtspgc.BaxterServerStub(channel)
def bxt_set_gripper(self, pos=100, armname = "rgt"):
self.stub.bxt_set_gripper(bxtsp.Gripper_pos_armname(pos=pos,armname=armname))
def bxt_get_gripper(self, armname="rgt"):
return self.stub.bxt_get_gripper(bxtsp.Armname(armname=armname))
def bxt_get_jnts(self, armname="rgt"):
jnts = pickle.loads(self.stub.bxt_get_jnts(bxtsp.Armname(armname=armname)).jnt_angles)
jnts = [jnts["right_s0"],jnts["right_s1"],jnts["right_e0"],jnts["right_e1"],jnts["right_w0"],jnts["right_w1"],jnts["right_w2"]] \
if armname == "rgt" else [jnts["left_s0"],jnts["left_s1"],jnts["left_e0"],jnts["left_e1"],jnts["left_w0"],jnts["left_w1"],jnts["left_w2"]]
jnts = [np.rad2deg(jnt) for jnt in jnts]
return jnts
def bxt_movejnts(self, jnt_angles= [], speed=.5, armname="rgt"):
self.stub.bxt_movejnts(bxtsp.Jnt_angles_armname(jnt_angles = np.array(jnt_angles,dtype="float").tobytes(),speed=speed,armname =armname))
def bxt_movejnts_cont(self, jnt_angles_list =[], speed=.2, armname="rgt"):
self.stub.bxt_movejnts_cont(bxtsp.Jnt_angles_armname(jnt_angles = np.array(jnt_angles_list,dtype="float").tobytes(),speed=speed,armname =armname))
def bxt_get_force(self,armname):
return np.frombuffer(self.stub.bxt_get_force(bxtsp.Armname(armname=armname)).list).tolist()
def bxt_get_image(self,camera_name):
image = self.stub.bxt_get_image(bxtsp.Camera_name(name=camera_name)).list
image = np.frombuffer(image)
image = np.reshape(image,(200,320,3)).astype("uint8")
# image = image[:,:,1]
return image
if __name__=="__main__":
import time
bc = BaxterClient(host = "10.1.0.24:18300")
# tic = time.time()
# imgx = hcc.getimgbytes()
# toc = time.time()
# td = toc-tic
# tic = time.time()
# imgxs = hcc.getimgstr()
# toc = time.time()
# td2 = toc-tic
# print(td, td2)
angle_rgt = bc.bxt_get_jnts("rgt")
# print angle_rgt
# print(angle_rgt[-1])
#
#
# angle_rgt[-1] = angle_rgt[-1] - 50.0
#
# bc.bxt_movejnts(angle_rgt)
print(bc.bxt_get_jnts(armname="rgt"))
print(bc.bxt_get_jnts(armname="lft"))
import cv2 as cv
cv.imshow("w",bc.bxt_get_image("head_camera"))
cv.waitKey(0)
# print bc.bxt_get_jnts("rgt")
# print(eval("a="+bc.bxt_get_jnts())) | 38.842857 | 154 | 0.668996 | 397 | 2,719 | 4.34005 | 0.261965 | 0.048752 | 0.04469 | 0.034823 | 0.306442 | 0.262914 | 0.190366 | 0.107951 | 0.107951 | 0.053395 | 0 | 0.025367 | 0.173593 | 2,719 | 70 | 155 | 38.842857 | 0.741433 | 0.128724 | 0 | 0 | 0 | 0 | 0.08383 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.205128 | false | 0 | 0.179487 | 0.051282 | 0.512821 | 0.051282 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
82e4981e82370f4b216afc9af7f4136625ccd93f | 3,644 | py | Python | fit1d/common/fit1d.py | michael-amat/fit1d | 0cd42874e3eba4353c564809c317510b626dee25 | [
"BSD-2-Clause"
] | null | null | null | fit1d/common/fit1d.py | michael-amat/fit1d | 0cd42874e3eba4353c564809c317510b626dee25 | [
"BSD-2-Clause"
] | null | null | null | fit1d/common/fit1d.py | michael-amat/fit1d | 0cd42874e3eba4353c564809c317510b626dee25 | [
"BSD-2-Clause"
] | 9 | 2019-02-24T12:51:28.000Z | 2019-03-22T09:25:45.000Z | """
fit1d package is designed to provide an organized toolbox for different types of
1D fits that can be performed.
It is easy to add new fits and other functionalities
"""
from abc import ABC, abstractmethod
import numpy as np
from typing import List,Tuple
from fit1d.common.model import Model, ModelMock
from fit1d.common.outlier import OutLier
from fit1d.common.fit_data import FitData
class Fit1D(ABC):
"""
This is the main class of the fit1d package. It is used to allow the user to execute
fit and eval methods, in addition to calc_RMS and calc_error static services.
The properties of this class are the _model and _outlier objects and a _use_remove_outliers
boolean
"""
_outlier: OutLier
_use_remove_outliers: bool
_fit_data: FitData
# interface methods
def fit(self, x: np.ndarray, y: np.ndarray) -> FitData:
self._fit_data.x = x
self._fit_data.y = y
if self._use_remove_outliers:
self._remove_outlier()
else:
self._calc_fit_and_update_fit_data()
return self._fit_data
def eval(self, x: np.ndarray = None, model: Model = None) -> np.ndarray:
if x is not None:
self._fit_data.x = x
if model is not None:
self._fit_data.model = model
self._calc_eval()
return self._fit_data.y_fit
def calc_error(self):
"""
calc error vector , update _fit_data
:return:
"""
if self._fit_data.y is not None and self._fit_data.y_fit is not None:
self._fit_data.error_vector = self._fit_data.y - self._fit_data.y_fit
def calc_rms(self):
if self._fit_data.error_vector is not None:
self._fit_data.rms = (sum(self._fit_data.error_vector ** 2) / len(self._fit_data.error_vector)) ** 0.5
def get_fit_data(self) -> FitData:
return self._fit_data
# abstract methods
@abstractmethod
def _calc_fit(self):
"""
abstractmethod:
run fit calculation of the data update model in _fit_data.model
:return: Null
"""
pass
@abstractmethod
def _calc_eval(self):
"""
abstractmethod:
subclass calculate model eval for inner x and model
update _fit_data.y_fit
:return: Void
"""
pass
# internal methods
def _update_fit_data(self):
self._calc_eval()
self.calc_error()
self.calc_rms()
def _remove_outlier(self):
while True:
self._calc_fit_and_update_fit_data()
indexes_to_remove = self._outlier.find_outliers(self._fit_data.error_vector)
if len(indexes_to_remove) == 0:
break
else:
self._remove_indexes(indexes_to_remove)
def _remove_indexes(self, ind):
self._fit_data.x = np.delete(self._fit_data.x, ind)
self._fit_data.y = np.delete(self._fit_data.y, ind)
def _calc_fit_and_update_fit_data(self):
self._calc_fit()
self._update_fit_data()
class Fit1DMock(Fit1D):
""" Mock class. Used only for tests """
def __init__(self, outlier: OutLier, remove_outliers: bool):
self._fit_data = FitData()
self._outlier = outlier
self._use_remove_outliers = remove_outliers
def _calc_fit(self):
self._fit_data.model = ModelMock({"param1": 5.5})
def _calc_eval(self) -> np.ndarray:
if self._fit_data.y is None or len(self._fit_data.y) == 4:
self._fit_data.y_fit = np.array([11, 22, 33, 44])
else:
self._fit_data.y_fit = np.array([11, 33, 44])
| 30.366667 | 114 | 0.638035 | 518 | 3,644 | 4.183398 | 0.227799 | 0.12275 | 0.137056 | 0.066451 | 0.223812 | 0.146747 | 0.067374 | 0.02215 | 0 | 0 | 0 | 0.011751 | 0.27607 | 3,644 | 119 | 115 | 30.621849 | 0.809704 | 0.208013 | 0 | 0.246377 | 0 | 0 | 0.002202 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.202899 | false | 0.028986 | 0.086957 | 0.014493 | 0.405797 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7d53f22522d63caa5e1b6eeef4ed280bfe59205b | 5,646 | py | Python | tests/unit/test_crypt.py | oba11/salt | ddc0286d57c5ce864b60bf43e5bc3007bf7c2549 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_crypt.py | oba11/salt | ddc0286d57c5ce864b60bf43e5bc3007bf7c2549 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_crypt.py | oba11/salt | ddc0286d57c5ce864b60bf43e5bc3007bf7c2549 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# python libs
from __future__ import absolute_import
import os
# salt testing libs
from tests.support.unit import TestCase, skipIf
from tests.support.mock import patch, call, mock_open, NO_MOCK, NO_MOCK_REASON, MagicMock
# salt libs
import salt.utils
import salt.utils.files
from salt import crypt
# third-party libs
try:
from Cryptodome.PublicKey import RSA # pylint: disable=unused-import
HAS_PYCRYPTO_RSA = True
except ImportError:
HAS_PYCRYPTO_RSA = False
if not HAS_PYCRYPTO_RSA:
try:
from Crypto.PublicKey import RSA
HAS_PYCRYPTO_RSA = True
except ImportError:
HAS_PYCRYPTO_RSA = False
PRIVKEY_DATA = (
'-----BEGIN RSA PRIVATE KEY-----\n'
'MIIEpAIBAAKCAQEA75GR6ZTv5JOv90Vq8tKhKC7YQnhDIo2hM0HVziTEk5R4UQBW\n'
'a0CKytFMbTONY2msEDwX9iA0x7F5Lgj0X8eD4ZMsYqLzqjWMekLC8bjhxc+EuPo9\n'
'Dygu3mJ2VgRC7XhlFpmdo5NN8J2E7B/CNB3R4hOcMMZNZdi0xLtFoTfwU61UPfFX\n'
'14mV2laqLbvDEfQLJhUTDeFFV8EN5Z4H1ttLP3sMXJvc3EvM0JiDVj4l1TWFUHHz\n'
'eFgCA1Im0lv8i7PFrgW7nyMfK9uDSsUmIp7k6ai4tVzwkTmV5PsriP1ju88Lo3MB\n'
'4/sUmDv/JmlZ9YyzTO3Po8Uz3Aeq9HJWyBWHAQIDAQABAoIBAGOzBzBYZUWRGOgl\n'
'IY8QjTT12dY/ymC05GM6gMobjxuD7FZ5d32HDLu/QrknfS3kKlFPUQGDAbQhbbb0\n'
'zw6VL5NO9mfOPO2W/3FaG1sRgBQcerWonoSSSn8OJwVBHMFLG3a+U1Zh1UvPoiPK\n'
'S734swIM+zFpNYivGPvOm/muF/waFf8tF/47t1cwt/JGXYQnkG/P7z0vp47Irpsb\n'
'Yjw7vPe4BnbY6SppSxscW3KoV7GtJLFKIxAXbxsuJMF/rYe3O3w2VKJ1Sug1VDJl\n'
'/GytwAkSUer84WwP2b07Wn4c5pCnmLslMgXCLkENgi1NnJMhYVOnckxGDZk54hqP\n'
'9RbLnkkCgYEA/yKuWEvgdzYRYkqpzB0l9ka7Y00CV4Dha9Of6GjQi9i4VCJ/UFVr\n'
'UlhTo5y0ZzpcDAPcoZf5CFZsD90a/BpQ3YTtdln2MMCL/Kr3QFmetkmDrt+3wYnX\n'
'sKESfsa2nZdOATRpl1antpwyD4RzsAeOPwBiACj4fkq5iZJBSI0bxrMCgYEA8GFi\n'
'qAjgKh81/Uai6KWTOW2kX02LEMVRrnZLQ9VPPLGid4KZDDk1/dEfxjjkcyOxX1Ux\n'
'Klu4W8ZEdZyzPcJrfk7PdopfGOfrhWzkREK9C40H7ou/1jUecq/STPfSOmxh3Y+D\n'
'ifMNO6z4sQAHx8VaHaxVsJ7SGR/spr0pkZL+NXsCgYEA84rIgBKWB1W+TGRXJzdf\n'
'yHIGaCjXpm2pQMN3LmP3RrcuZWm0vBt94dHcrR5l+u/zc6iwEDTAjJvqdU4rdyEr\n'
'tfkwr7v6TNlQB3WvpWanIPyVzfVSNFX/ZWSsAgZvxYjr9ixw6vzWBXOeOb/Gqu7b\n'
'cvpLkjmJ0wxDhbXtyXKhZA8CgYBZyvcQb+hUs732M4mtQBSD0kohc5TsGdlOQ1AQ\n'
'McFcmbpnzDghkclyW8jzwdLMk9uxEeDAwuxWE/UEvhlSi6qdzxC+Zifp5NBc0fVe\n'
'7lMx2mfJGxj5CnSqQLVdHQHB4zSXkAGB6XHbBd0MOUeuvzDPfs2voVQ4IG3FR0oc\n'
'3/znuwKBgQChZGH3McQcxmLA28aUwOVbWssfXKdDCsiJO+PEXXlL0maO3SbnFn+Q\n'
'Tyf8oHI5cdP7AbwDSx9bUfRPjg9dKKmATBFr2bn216pjGxK0OjYOCntFTVr0psRB\n'
'CrKg52Qrq71/2l4V2NLQZU40Dr1bN9V+Ftd9L0pvpCAEAWpIbLXGDw==\n'
'-----END RSA PRIVATE KEY-----')
PUBKEY_DATA = (
'-----BEGIN PUBLIC KEY-----\n'
'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA75GR6ZTv5JOv90Vq8tKh\n'
'KC7YQnhDIo2hM0HVziTEk5R4UQBWa0CKytFMbTONY2msEDwX9iA0x7F5Lgj0X8eD\n'
'4ZMsYqLzqjWMekLC8bjhxc+EuPo9Dygu3mJ2VgRC7XhlFpmdo5NN8J2E7B/CNB3R\n'
'4hOcMMZNZdi0xLtFoTfwU61UPfFX14mV2laqLbvDEfQLJhUTDeFFV8EN5Z4H1ttL\n'
'P3sMXJvc3EvM0JiDVj4l1TWFUHHzeFgCA1Im0lv8i7PFrgW7nyMfK9uDSsUmIp7k\n'
'6ai4tVzwkTmV5PsriP1ju88Lo3MB4/sUmDv/JmlZ9YyzTO3Po8Uz3Aeq9HJWyBWH\n'
'AQIDAQAB\n'
'-----END PUBLIC KEY-----')
MSG = b'It\'s me, Mario'
SIG = (
b'\x07\xf3\xb1\xe7\xdb\x06\xf4_\xe2\xdc\xcb!F\xfb\xbex{W\x1d\xe4E'
b'\xd3\r\xc5\x90\xca(\x05\x1d\x99\x8b\x1aug\x9f\x95>\x94\x7f\xe3+'
b'\x12\xfa\x9c\xd4\xb8\x02]\x0e\xa5\xa3LL\xc3\xa2\x8f+\x83Z\x1b\x17'
b'\xbfT\xd3\xc7\xfd\x0b\xf4\xd7J\xfe^\x86q"I\xa3x\xbc\xd3$\xe9M<\xe1'
b'\x07\xad\xf2_\x9f\xfa\xf7g(~\xd8\xf5\xe7\xda-\xa3Ko\xfc.\x99\xcf'
b'\x9b\xb9\xc1U\x97\x82\'\xcb\xc6\x08\xaa\xa0\xe4\xd0\xc1+\xfc\x86'
b'\r\xe4y\xb1#\xd3\x1dS\x96D28\xc4\xd5\r\xd4\x98\x1a44"\xd7\xc2\xb4'
b']\xa7\x0f\xa7Db\x85G\x8c\xd6\x94!\x8af1O\xf6g\xd7\x03\xfd\xb3\xbc'
b'\xce\x9f\xe7\x015\xb8\x1d]AHK\xa0\x14m\xda=O\xa7\xde\xf2\xff\x9b'
b'\x8e\x83\xc8j\x11\x1a\x98\x85\xde\xc5\x91\x07\x84!\x12^4\xcb\xa8'
b'\x98\x8a\x8a&#\xb9(#?\x80\x15\x9eW\xb5\x12\xd1\x95S\xf2<G\xeb\xf1'
b'\x14H\xb2\xc4>\xc3A\xed\x86x~\xcfU\xd5Q\xfe~\x10\xd2\x9b')
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not HAS_PYCRYPTO_RSA, 'pycrypto >= 2.6 is not available')
class CryptTestCase(TestCase):
def test_gen_keys(self):
with patch.multiple(os, umask=MagicMock(), chmod=MagicMock(), chown=MagicMock,
access=MagicMock(return_value=True)):
with patch('salt.utils.files.fopen', mock_open()):
open_priv_wb = call('/keydir/keyname.pem', 'wb+')
open_pub_wb = call('/keydir/keyname.pub', 'wb+')
with patch('os.path.isfile', return_value=True):
self.assertEqual(crypt.gen_keys('/keydir', 'keyname', 2048), '/keydir/keyname.pem')
self.assertNotIn(open_priv_wb, salt.utils.files.fopen.mock_calls)
self.assertNotIn(open_pub_wb, salt.utils.files.fopen.mock_calls)
with patch('os.path.isfile', return_value=False):
with patch('salt.utils.files.fopen', mock_open()):
crypt.gen_keys('/keydir', 'keyname', 2048)
salt.utils.files.fopen.assert_has_calls([open_priv_wb, open_pub_wb], any_order=True)
def test_sign_message(self):
key = RSA.importKey(PRIVKEY_DATA)
with patch('salt.crypt._get_rsa_key', return_value=key):
self.assertEqual(SIG, salt.crypt.sign_message('/keydir/keyname.pem', MSG))
def test_verify_signature(self):
with patch('salt.utils.files.fopen', mock_open(read_data=PUBKEY_DATA)):
self.assertTrue(crypt.verify_signature('/keydir/keyname.pub', MSG, SIG))
| 49.526316 | 108 | 0.732554 | 620 | 5,646 | 6.56129 | 0.504839 | 0.017699 | 0.02409 | 0.028024 | 0.106686 | 0.097837 | 0.083579 | 0.053097 | 0.026549 | 0.026549 | 0 | 0.10547 | 0.145236 | 5,646 | 113 | 109 | 49.964602 | 0.737464 | 0.017712 | 0 | 0.106383 | 0 | 0.031915 | 0.571403 | 0.514353 | 0 | 0 | 0 | 0 | 0.06383 | 1 | 0.031915 | false | 0 | 0.12766 | 0 | 0.170213 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7d55cd544a02e7f8eda686f396f1e614dce7adb0 | 11,660 | py | Python | msg/tools/genmsg/test/test_genmsg_msgs.py | sikuner/Firmware_Marine | 80411dc4eb5aa9dc8eb3ca8ff6d59d1cf081a010 | [
"BSD-3-Clause"
] | 17 | 2020-03-13T00:10:28.000Z | 2021-09-06T17:13:17.000Z | msg/tools/genmsg/test/test_genmsg_msgs.py | sikuner/Firmware_Marine | 80411dc4eb5aa9dc8eb3ca8ff6d59d1cf081a010 | [
"BSD-3-Clause"
] | 1 | 2020-08-24T03:28:49.000Z | 2020-08-24T03:28:49.000Z | msg/tools/genmsg/test/test_genmsg_msgs.py | sikuner/Firmware_Marine | 80411dc4eb5aa9dc8eb3ca8ff6d59d1cf081a010 | [
"BSD-3-Clause"
] | 2 | 2020-03-13T09:05:32.000Z | 2021-08-13T08:28:14.000Z | # Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import random
def test_bare_msg_type():
import genmsg.msgs
tests = [(None, None), ('String', 'String'), ('std_msgs/String', 'std_msgs/String'),
('String[10]', 'String'), ('string[10]', 'string'), ('std_msgs/String[10]', 'std_msgs/String'),
]
for val, res in tests:
assert res == genmsg.msgs.bare_msg_type(val)
PKG = 'genmsg'
def test_resolve_type():
from genmsg.msgs import resolve_type, bare_msg_type
for t in ['string', 'string[]', 'string[14]', 'int32', 'int32[]']:
bt = bare_msg_type(t)
t == resolve_type(t, PKG)
assert 'foo/string' == resolve_type('foo/string', PKG)
assert 'std_msgs/Header' == resolve_type('Header', 'roslib')
assert 'std_msgs/Header' == resolve_type('std_msgs/Header', 'roslib')
assert 'std_msgs/Header' == resolve_type('Header', 'stereo_msgs')
assert 'std_msgs/String' == resolve_type('String', 'std_msgs')
assert 'std_msgs/String' == resolve_type('std_msgs/String', 'std_msgs')
assert 'std_msgs/String' == resolve_type('std_msgs/String', PKG)
assert 'std_msgs/String[]' == resolve_type('std_msgs/String[]', PKG)
def test_parse_type():
import genmsg.msgs
tests = [
('a', ('a', False, None)),
('int8', ('int8', False, None)),
('std_msgs/String', ('std_msgs/String', False, None)),
('a[]', ('a', True, None)),
('int8[]', ('int8', True, None)),
('std_msgs/String[]', ('std_msgs/String', True, None)),
('a[1]', ('a', True, 1)),
('int8[1]', ('int8', True, 1)),
('std_msgs/String[1]', ('std_msgs/String', True, 1)),
('a[11]', ('a', True, 11)),
('int8[11]', ('int8', True, 11)),
('std_msgs/String[11]', ('std_msgs/String', True, 11)),
]
for val, res in tests:
assert res == genmsg.msgs.parse_type(val)
fail = ['a[1][2]', 'a[][]', '', None, 'a[', 'a[[1]', 'a[1]]']
for f in fail:
try:
genmsg.msgs.parse_type(f)
assert False, "should have failed on %s"%f
except ValueError as e:
pass
def test_Constant():
import genmsg.msgs
vals = [random.randint(0, 1000) for i in range(0, 3)]
type_, name, val = [str(x) for x in vals]
x = genmsg.msgs.Constant(type_, name, val, str(val))
assert type_ == x.type
assert name == x.name
assert val == x.val
assert x == genmsg.msgs.Constant(type_, name, val, str(val))
assert x != 1
assert not x == 1
assert x != genmsg.msgs.Constant('baz', name, val, str(val))
assert x != genmsg.msgs.Constant(type_, 'foo', val, str(val))
assert x != genmsg.msgs.Constant(type_, name, 'foo', 'foo')
# tripwire
assert repr(x)
assert str(x)
try:
genmsg.msgs.Constant(None, name, val, str(val))
assert False, "should have raised"
except: pass
try:
genmsg.msgs.Constant(type_, None, val, str(val))
assert False, "should have raised"
except: pass
try:
genmsg.msgs.Constant(type_, name, None, 'None')
assert False, "should have raised"
except: pass
try:
genmsg.msgs.Constant(type_, name, val, None)
assert False, "should have raised"
except: pass
try:
x.foo = 'bar'
assert False, 'Constant should not allow arbitrary attr assignment'
except: pass
def test_MsgSpec():
def sub_test_MsgSpec(types, names, constants, text, full_name, has_header):
m = MsgSpec(types, names, constants, text, full_name)
assert m.types == types
assert m.names == names
assert m.text == text
assert has_header == m.has_header()
assert m.constants == constants
assert list(zip(types, names)) == m.fields()
assert m == MsgSpec(types, names, constants, text, full_name)
return m
from genmsg import MsgSpec, InvalidMsgSpec
from genmsg.msgs import Field
# don't allow duplicate fields
try:
MsgSpec(['int32', 'int64'], ['x', 'x'], [], 'int32 x\nint64 x', 'x/DupFields')
assert False, "should have raised"
except InvalidMsgSpec:
pass
# don't allow invalid fields
try:
MsgSpec(['string['], ['x'], [], 'int32 x\nint64 x', 'x/InvalidFields')
assert False, "should have raised"
except InvalidMsgSpec:
pass
# allow empty msg
empty = sub_test_MsgSpec([], [], [], '', 'x/Nothing', False)
assert [] == empty.fields()
assert [] == empty.parsed_fields()
assert 'x/Nothing' == empty.full_name
assert 'x' == empty.package
assert 'Nothing' == empty.short_name
# one-field
one_field = sub_test_MsgSpec(['int32'], ['x'], [], 'int32 x', 'x/OneInt', False)
# make sure that equals tests every declared field
assert one_field == MsgSpec(['int32'], ['x'], [], 'int32 x', 'x/OneInt')
assert one_field != MsgSpec(['uint32'], ['x'], [], 'int32 x', 'x/OneInt')
assert one_field != MsgSpec(['int32'], ['y'], [], 'int32 x', 'x/OneInt')
assert one_field != MsgSpec(['int32'], ['x'], [], 'uint32 x', 'x/OneInt')
assert one_field != MsgSpec(['int32'], ['x'], [], 'int32 x', 'x/OneIntBad')
# test against __ne__ as well
assert one_field != MsgSpec(['int32'], ['x'], [], 'uint32 x', 'x/OneInt')
assert [Field('x', 'int32')] == one_field.parsed_fields(), "%s vs %s"%([Field('x', 'int32')], one_field.parsed_fields())
#test str
assert "int32 x" == str(one_field).strip()
# test variations of multiple fields and headers
two_fields = sub_test_MsgSpec(['int32', 'string'], ['x', 'str'], [], 'int32 x\nstring str', 'x/TwoFields', False)
assert [Field('x', 'int32'), Field('str', 'string')] == two_fields.parsed_fields()
one_header = sub_test_MsgSpec(['std_msgs/Header'], ['header'], [], 'Header header', 'x/OneHeader', True)
header_and_fields = sub_test_MsgSpec(['std_msgs/Header', 'int32', 'string'], ['header', 'x', 'str'], [], 'Header header\nint32 x\nstring str', 'x/HeaderAndFields', True)
embed_types = sub_test_MsgSpec(['std_msgs/Header', 'std_msgs/Int32', 'string'], ['header', 'x', 'str'], [], 'Header header\nstd_msgs/Int32 x\nstring str', 'x/EmbedTypes', True)
#test strify
assert "int32 x\nstring str" == str(two_fields).strip()
# types and names mismatch
try:
MsgSpec(['int32', 'int32'], ['intval'], [], 'int32 intval\int32 y', 'x/Mismatch')
assert False, "types and names must align"
except: pass
# test (not) equals against non msgspec
assert not (one_field == 1)
assert one_field != 1
# test constants
from genmsg.msgs import Constant
msgspec = MsgSpec(['int32'], ['x'], [Constant('int8', 'c', 1, '1')], 'int8 c=1\nuint32 x', 'x/Constants')
assert msgspec.constants == [Constant('int8', 'c', 1, '1')]
# tripwire
str(msgspec)
repr(msgspec)
# test that repr doesn't throw an error
[repr(x) for x in [empty, one_field, one_header, two_fields, embed_types]]
def test_Field():
from genmsg.msgs import Field
field = Field('foo', 'string')
assert field == Field('foo', 'string')
assert field != Field('bar', 'string')
assert field != Field('foo', 'int32')
assert field != 1
assert not field == 1
assert field.name == 'foo'
assert field.type == 'string'
assert field.base_type == 'string'
assert field.is_array == False
assert field.array_len == None
assert field.is_header == False
assert field.is_builtin == True
field = Field('foo', 'std_msgs/String')
assert field.type == 'std_msgs/String'
assert field.base_type == 'std_msgs/String'
assert field.is_array == False
assert field.array_len == None
assert field.is_header == False
assert field.is_builtin == False
field = Field('foo', 'std_msgs/String[5]')
assert field.type == 'std_msgs/String[5]'
assert field.base_type == 'std_msgs/String'
assert field.is_array == True
assert field.array_len == 5
assert field.is_header == False
assert field.is_builtin == False
field = Field('foo', 'std_msgs/String[]')
assert field.type == 'std_msgs/String[]'
assert field.base_type == 'std_msgs/String'
assert field.is_array == True
assert field.array_len == None
assert field.is_header == False
assert field.is_builtin == False
field = Field('foo', 'std_msgs/Header')
assert field.type == 'std_msgs/Header'
assert field.is_header == True
assert field.is_builtin == False
field = Field('foo', 'std_msgs/Header[]')
assert field.type == 'std_msgs/Header[]'
assert field.is_header == False
#tripwire
repr(field)
def test_is_valid_msg_type():
import genmsg.msgs
vals = [
#basic
'F', 'f', 'Foo', 'Foo1',
'std_msgs/String',
# arrays
'Foo[]', 'Foo[1]', 'Foo[10]',
]
for v in vals:
assert genmsg.msgs.is_valid_msg_type(v), "genmsg.msgs.is_valid_msg_type should have returned True for '%s'"%v
# bad cases
vals = [None, '', '#', '%', 'Foo%', 'Woo Woo',
'/', '/String',
'Foo[f]', 'Foo[1d]', 'Foo[-1]', 'Foo[1:10]', 'Foo[', 'Foo]', 'Foo[]Bar']
for v in vals:
assert not genmsg.msgs.is_valid_msg_type(v), "genmsg.msgs.is_valid_msg_type should have returned False for '%s'"%v
def test_is_valid_constant_type():
import genmsg.msgs
valid = ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', \
'uint64', 'float32', 'float64', 'char', 'byte', 'string']
invalid = [
'std_msgs/String', '/', 'String',
'time', 'duration','header',
]
for v in valid:
assert genmsg.msgs.is_valid_constant_type(v), "genmsg.msgs.is_valid_constant_type should have returned True for '%s'"%v
for v in invalid:
assert not genmsg.msgs.is_valid_constant_type(v), "genmsg.msgs.is_valid_constant_type should have returned False for '%s'"%v
| 38.996656 | 180 | 0.620583 | 1,567 | 11,660 | 4.495852 | 0.178685 | 0.043719 | 0.055358 | 0.021718 | 0.473243 | 0.421718 | 0.394322 | 0.340099 | 0.295387 | 0.247693 | 0 | 0.019167 | 0.225901 | 11,660 | 298 | 181 | 39.127517 | 0.761356 | 0.165523 | 0 | 0.274882 | 0 | 0 | 0.235896 | 0.015292 | 0 | 0 | 0 | 0 | 0.445498 | 1 | 0.042654 | false | 0.042654 | 0.061611 | 0 | 0.109005 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7d56e588d7a6fdb0c64b6925b9b5823ebec11f36 | 4,547 | py | Python | tests/tests.py | arck1/aio-counter | ffff58bf14ca2f155be5a54c9385481fce5ee58c | [
"MIT"
] | null | null | null | tests/tests.py | arck1/aio-counter | ffff58bf14ca2f155be5a54c9385481fce5ee58c | [
"MIT"
] | null | null | null | tests/tests.py | arck1/aio-counter | ffff58bf14ca2f155be5a54c9385481fce5ee58c | [
"MIT"
] | null | null | null | import unittest
from asyncio import sleep
from async_unittest import TestCase
from aio_counter import AioCounter
from aio_counter.exceptions import AioCounterException
class TestAioCounter(TestCase):
TIK = float(0.3)
TAK = float(0.6)
TTL = int(1)
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.counter = AioCounter(loop=cls.loop)
@classmethod
def tearDownClass(cls) -> None:
super().tearDownClass()
cls.counter.close()
def setUp(self) -> None:
self.counter._count = 0
self.counter._incs.clear()
self.counter._decs.clear()
# close all handlers
self.counter.close()
self.counter._handlers.clear()
def tearDown(self) -> None:
self.counter.close()
async def test_dec(self):
assert self.counter.empty()
self.counter._loop.call_later(self.TIK, self.counter.inc_nowait)
assert self.counter.count == 0
# wait until delayed inc_nowait increment counter
count = await self.counter.dec()
assert count == 0
async def test_inc(self):
assert self.counter.empty()
# fill counter
self.counter._count = self.counter.max_count
assert self.counter.count == self.counter.max_count
self.counter._loop.call_later(self.TIK, self.counter.dec_nowait)
assert self.counter.count == self.counter.max_count
# wait until delayed dec_nowait decrement counter
count = await self.counter.inc()
assert count == self.counter.max_count
def test_dec_nowait(self):
assert self.counter.empty()
try:
self.counter.dec_nowait()
except AioCounterException as e:
assert e
else:
assert False
count = self.counter.inc_nowait()
assert count == 1
assert self.counter.count == 1
count = self.counter.dec_nowait()
assert count == 0
assert self.counter.count == 0
def test_inc_nowait(self):
assert self.counter.empty()
count = self.counter.inc_nowait()
assert count == 1
assert self.counter.count == 1
# fill counter
self.counter._count = self.counter.max_count
try:
self.counter.inc_nowait()
except AioCounterException as e:
assert e
else:
assert False
async def test_ttl_inc(self):
assert self.counter.empty()
# inc with ttl = TTL
await self.counter.inc(self.TTL)
assert self.counter.count == 1
# sleep and inc() should run in one loop
await sleep(self.TTL, loop=self.loop)
# check if count was dec
assert self.counter.count == 0
async def test_bulk_inc(self):
"""
inc() with value > 1 should success only if counter changed to <value > 1> in one moment
:return:
"""
assert self.counter.empty()
# fill counter
self.counter._count = self.counter.max_count - 1
assert self.counter.count == self.counter.max_count - 1
def delayed_check(counter):
assert counter.count == counter.max_count - 1
self.counter._loop.call_later(self.TIK, delayed_check, self.counter)
self.counter._loop.call_later(self.TTL, self.counter.dec_nowait)
assert self.counter.count == self.counter.max_count - 1
await self.counter.inc(value=2)
assert self.counter.count == self.counter.max_count
async def test_bulk_dec(self):
"""
dec() with value > 1 should success only if counter changed to <value > 1> in one moment
:return:
"""
assert self.counter.empty()
await self.counter.inc()
assert self.counter.count == 1
def delayed_check(counter):
assert counter.count == 1
self.counter._loop.call_later(self.TIK, delayed_check, self.counter)
self.counter._loop.call_later(self.TTL, self.counter.inc_nowait)
assert self.counter.count == 1
await self.counter.dec(value=2)
assert self.counter.empty()
async def test_ttl_after_dec(self):
assert self.counter.empty()
await self.counter.inc(self.TTL)
assert self.counter.count == 1
count = self.counter.dec_nowait()
assert count == 0
assert self.counter.count == 0
await sleep(self.TTL, loop=self.loop)
if __name__ == '__main__':
unittest.main()
| 25.544944 | 96 | 0.61667 | 569 | 4,547 | 4.803163 | 0.147627 | 0.269667 | 0.149287 | 0.120746 | 0.69667 | 0.615441 | 0.565679 | 0.544457 | 0.4764 | 0.387486 | 0 | 0.009861 | 0.286343 | 4,547 | 177 | 97 | 25.689266 | 0.832357 | 0.051463 | 0 | 0.553398 | 0 | 0 | 0.001978 | 0 | 0 | 0 | 0 | 0 | 0.349515 | 1 | 0.07767 | false | 0 | 0.048544 | 0 | 0.165049 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7d68c3cd5ebdfbe4a4f33c56583ea1d144745710 | 915 | py | Python | chess/pythonchess/docs/conf.py | mahakbansal/ChessAlphaZero | 2b3f823fdc252d7fd32de0b5e4e53aece9082dd5 | [
"MIT"
] | 2 | 2021-02-22T21:53:58.000Z | 2021-04-03T16:40:52.000Z | chess/pythonchess/docs/conf.py | mahakbansal/ChessAlphaZero | 2b3f823fdc252d7fd32de0b5e4e53aece9082dd5 | [
"MIT"
] | 1 | 2018-09-26T03:38:57.000Z | 2018-09-26T03:38:57.000Z | chess/pythonchess/docs/conf.py | mahakbansal/ChessAlphaZero | 2b3f823fdc252d7fd32de0b5e4e53aece9082dd5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import os
# Import the chess module.
sys.path.insert(0, os.path.abspath('..'))
import chess
# Autodoc.
extensions = ["sphinx.ext.autodoc"]
autodoc_member_order = 'bysource'
# The suffix of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "python-chess"
copyright = "2014–2018, Niklas Fiekas"
# The version.
version = chess.__version__
release = chess.__version__
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
| 22.875 | 74 | 0.747541 | 128 | 915 | 5.1875 | 0.617188 | 0.036145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012953 | 0.156284 | 915 | 39 | 75 | 23.461538 | 0.845855 | 0.491803 | 0 | 0 | 0 | 0 | 0.20354 | 0 | 0 | 0 | 0 | 0.025641 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7d6f707bec1ef6f1945e2739232de8ac3b5e6c3e | 1,953 | py | Python | samples/unsharp/unsharp.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
] | 7 | 2019-08-20T02:43:44.000Z | 2019-12-13T14:26:05.000Z | samples/unsharp/unsharp.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
] | null | null | null | samples/unsharp/unsharp.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
] | 1 | 2019-07-25T21:46:50.000Z | 2019-07-25T21:46:50.000Z | import heterocl as hcl
from math import sqrt
hcl.config.init_dtype = hcl.Float()
input_image = hcl.placeholder((480, 640, 3), name = "input")
output_image = hcl.placeholder((480, 640, 3), name = "output")
def unsharp(input_image, output_image):
"""
Helper Functions
"""
def clamp(val, min_, max_):
local = hcl.local(val)
with hcl.if_(val < min_):
local[0] = min_
with hcl.elif_(val > max_):
local[0] = max_
return local[0]
def clamp2D(tensor, min_, max_):
return hcl.compute(tensor.shape, lambda x, y: clamp(tensor[x, y], min_, max_), name = "clamped_" + tensor.name)
def clamp3D(tensor, min_, max_):
return hcl.compute(tensor.shape, lambda x, y, c: clamp(tensor[x, y, c], min_, max_), name = "clamped_" + tensor.name)
def kernel_f(x):
return hcl.exp(-(x * x) / (2 * 1.5 * 1.5)) / sqrt(2 * 3.14159 * 1.5)
def kernel(x):
return kernel_f(x) * 255 / (kernel_f(0) + kernel_f(1) * 2 + kernel_f(2) * 2 + kernel_f(3) * 2 + kernel_f(4) * 2)
rx = hcl.reduce_axis(-4, 5, "rx")
ry = hcl.reduce_axis(-4, 5, "ry")
my = hcl.reduce_axis(0, 640, "my")
gray = hcl.compute((480, 640), lambda x, y: (input_image[x, y, 0] * 77 + input_image[x, y, 1] * 150 + input_image[x, y, 2] * 29) >> 8, name = "gray")
blur = hcl.compute(gray.shape, lambda x, y: hcl.sum(gray[rx+x, ry+y] * kernel(rx) * kernel(ry), axis = [rx, ry]), name = "blur")
sharpen = clamp2D(hcl.compute(gray.shape, lambda x, y: gray[x, y] * 2 - blur[x, y], name = "sharpen"), 0, 255)
ratio = clamp2D(hcl.compute(gray.shape, lambda x, y: sharpen[x, y] * 32 / hcl.max(gray[x, my], axis = my), name = "ratio"), 0, 255)
out = clamp3D(hcl.compute(output_image.shape, lambda x, y, c: ratio[x, y] * input_image[x, y, c] >> 5, name = "out"), 0, 255)
U = hcl.update(output_image, lambda x, y, c: out[x, y, c])
return U
s = hcl.make_schedule([input_image, output_image], unsharp)
print hcl.lower(s, [input_image, output_image])
| 39.06 | 151 | 0.620072 | 337 | 1,953 | 3.462908 | 0.222552 | 0.032562 | 0.054841 | 0.066838 | 0.327335 | 0.288775 | 0.264781 | 0.138817 | 0.080548 | 0.080548 | 0 | 0.056329 | 0.190988 | 1,953 | 49 | 152 | 39.857143 | 0.682278 | 0 | 0 | 0 | 0 | 0 | 0.029091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.060606 | null | null | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7d762e8385c0a3df789a5bd08064a714cdafb006 | 2,420 | py | Python | woke/woke/a_config/data_model.py | Ackee-Blockchain/woke | 0d27de25720142beb9619a89619b7a94c3556af1 | [
"ISC"
] | 7 | 2022-01-28T06:50:00.000Z | 2022-02-14T11:34:32.000Z | woke/woke/a_config/data_model.py | Ackee-Blockchain/woke | 0d27de25720142beb9619a89619b7a94c3556af1 | [
"ISC"
] | 30 | 2022-01-26T17:54:48.000Z | 2022-03-21T12:33:53.000Z | woke/woke/a_config/data_model.py | Ackee-Blockchain/woke | 0d27de25720142beb9619a89619b7a94c3556af1 | [
"ISC"
] | null | null | null | from typing import Optional, List
from pathlib import Path
from dataclasses import astuple
import re
from pydantic import BaseModel, Field, Extra, validator
from pydantic.dataclasses import dataclass
from woke.core.enums import EvmVersionEnum
from woke.c_regex_parsing.solidity_version import SolidityVersion
class WokeConfigModel(BaseModel):
class Config:
allow_mutation = False
json_encoders = {
SolidityVersion: str,
}
extra = Extra.forbid
@dataclass
class SolcRemapping:
context: Optional[str]
prefix: str
target: Optional[str]
def __iter__(self):
return iter(astuple(self))
def __str__(self):
return f"{self.context or ''}:{self.prefix}={self.target or ''}"
class SolcWokeConfig(WokeConfigModel):
allow_paths: List[Path] = []
"""Woke should set solc `--allow-paths` automatically. This option allows to specify additional allowed paths."""
evm_version: Optional[EvmVersionEnum] = None
"""Version of the EVM to compile for. Leave unset to let the solc decide."""
include_paths: List[Path] = []
remappings: List[SolcRemapping] = []
target_version: Optional[SolidityVersion] = None
@validator("allow_paths", pre=True, each_item=True)
def set_allow_path(cls, v):
return Path(v).resolve()
@validator("include_paths", pre=True, each_item=True)
def set_include_path(cls, v):
return Path(v).resolve()
@validator("remappings", pre=True, each_item=True)
def set_remapping(cls, v):
if isinstance(v, SolcRemapping):
return v
remapping_re = re.compile(
r"(?:(?P<context>[^:\s]+)?:)?(?P<prefix>[^\s=]+)=(?P<target>[^\s]+)?"
)
match = remapping_re.match(v)
assert match, f"`{v}` is not a valid solc remapping."
groupdict = match.groupdict()
context = groupdict["context"]
prefix = groupdict["prefix"]
target = groupdict["target"]
return SolcRemapping(context, prefix, target)
class CompilerWokeConfig(WokeConfigModel):
solc: SolcWokeConfig = Field(default_factory=SolcWokeConfig)
class TopLevelWokeConfig(WokeConfigModel):
subconfigs: List[Path] = []
compiler: CompilerWokeConfig = Field(default_factory=CompilerWokeConfig)
@validator("subconfigs", pre=True, each_item=True)
def set_subconfig(cls, v):
return Path(v).resolve()
| 30.25 | 117 | 0.673554 | 278 | 2,420 | 5.744604 | 0.352518 | 0.017533 | 0.027552 | 0.03757 | 0.126487 | 0.126487 | 0.112711 | 0.081403 | 0 | 0 | 0 | 0 | 0.209917 | 2,420 | 79 | 118 | 30.632911 | 0.835251 | 0 | 0 | 0.051724 | 0 | 0 | 0.098162 | 0.04303 | 0 | 0 | 0 | 0 | 0.017241 | 1 | 0.103448 | false | 0 | 0.137931 | 0.086207 | 0.655172 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
7d77a393017f4de426158a54d01130a88642e6af | 34,661 | py | Python | market_sim/_agents/risk_model.py | quanttrade/rl_trading | f4168c69f44fe5a11a06461387d4591426a43735 | [
"Apache-2.0"
] | 247 | 2017-09-14T03:26:39.000Z | 2022-03-30T10:23:02.000Z | market_sim/_agents/risk_model.py | Deeptradingfx/rl_trading | f4168c69f44fe5a11a06461387d4591426a43735 | [
"Apache-2.0"
] | null | null | null | market_sim/_agents/risk_model.py | Deeptradingfx/rl_trading | f4168c69f44fe5a11a06461387d4591426a43735 | [
"Apache-2.0"
] | 111 | 2017-10-18T07:47:07.000Z | 2022-03-30T10:18:49.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Implement different methods to hedge positions and measure the risk of a Zero
cupon bond portfolio
REFERENCE: Nawalkha, S. K; Soto, G. M.; Beliaeva, N. A., "Interest Rate Risk
Modeling, the fixed Income Valuation course". Wiley, 2005
@author: ucaiado
Created on 12/22/2016
"""
import numpy as np
import math
import pandas as pd
import pprint
'''
Begin help functions
'''
'''
End help functions
'''
def update_maxmin(f_frice, a):
'''
Update maximum and minimum price observed by the agent while positioned
:param f_frice: float.
:param a: agent object.
'''
if f_frice > a.current_max_price:
a.current_max_price = f_frice
if f_frice < a.current_min_price:
a.current_min_price = f_frice
class RiskModel(object):
'''
A basic risk model representation for a fixed income strategy that measures
the loss potential and the immunization needs
'''
def __init__(self, env, f_portfolio_value=10**6):
'''
Initiate a RiskModel object. Save all parameters as attributes
:param env: Environment object. the environment that uses this object
:param f_portfolio_value*: float. The total
'''
self.env = env
self.l_hedging_instr = env.l_hedge
self.s_main = env.s_main_intrument
self.l_ratios = []
self.d_dv01 = {}
self.na_pu = None
self.na_du = None
self.f_portfolio_value = f_portfolio_value
self.s_risk_model = 'BasicModel'
self.b_stop_trading = False
self.price_stop_buy = None
self.price_stop_sell = None
def reset(self):
'''
reset risk model parameters to use in a new simulation
'''
self.current_price = None
self.b_stop_trading = False
self.price_stop_buy = None
self.price_stop_sell = None
self.l_ratios = []
self.na_pu = None
self.na_du = None
def set_ratios(self):
'''
Set the DV01 ratios of the pair between the main instrument and the
others avaiable to hedging
'''
# calculate the dv01 for each instrument
d_aux = {}
l_rtn = []
l_du = []
for s_key, idx in self.env.order_matching.d_map_book_list.iteritems():
book_obj = self.env.order_matching.l_order_books[idx]
f_du = self.env.l_du[self.env.order_matching.idx][idx]/252.
f_price, f_qty = book_obj.best_bid
f_dv01 = (f_du*10.)/(1. + f_price/100.)**(1. + f_du)
d_aux[s_key] = f_dv01
l_du.append(f_du)
# calculate the ration in relation to the main instrument
self.d_dv01 = d_aux
for s_instr in self.l_hedging_instr:
l_rtn.append(d_aux[s_instr]/d_aux[self.s_main])
self.l_du = l_du
return l_rtn
def portfolio_duration(self, d_position):
'''
Return the duration of a portfolio
:param d_position: dictionary. portfolio to be hedged
'''
l_pu = []
l_pos = []
l_du = []
self.last_pu = {}
self.last_pos = {}
self.last_du = {}
for s_key, idx in self.env.order_matching.d_map_book_list.iteritems():
book_obj = self.env.order_matching.l_order_books[idx]
f_du = self.env.l_du[self.env.order_matching.idx][idx]
f_price, f_qty = book_obj.best_bid
f_pu = 10.**5/(1. + f_price/100.)**(f_du/252.)
f_pos = -d_position[s_key]['qBid'] # inverto para qty em PU ?
f_pos -= -d_position[s_key]['qAsk']
self.last_du[s_key] = f_du
l_du.append(f_du)
self.last_pos[s_key] = f_pos
l_pos.append(f_pos)
self.last_pu[s_key] = f_pu
l_pu.append(f_pu)
return self._get_duration(l_pu, l_du, l_pos)
def _get_duration(self, l_pu, l_du, l_pos):
'''
Calculate the duration for a given position
:param l_pu: list.
:param l_du: list.
:param l_pos: list. final position in each instrument traded
'''
na_weight = self._get_weights(l_pu, l_pos)
return sum(np.array(l_du)/252. * na_weight)
def _get_weights(self, l_pu, l_pos):
'''
Return the positions as portfolio weights
:param l_pu: list. the PU of each instrument
:param l_pos: list. final position in each instrument traded (in PU)
'''
na_weight = np.array(l_pu) * np.array(l_pos)
na_weight /= self.f_portfolio_value
return na_weight
def get_instruments_to_hedge(self, agent):
'''
Return a list of tuples with the instruments and quantities that can be
used to hedge a given portfolio
:param agent: Agent object. agent that need to hedge
'''
d_position = agent.position
return self._get_instruments_to_hedge(d_position)
def _get_instruments_to_hedge(self, d_position):
'''
Return a list of tuples with the instruments and quantities that can be
used to hedge a given portfolio
:param d_position: dictionary. portfolio in qty of contracts
'''
# check the ratios just once
if not self.l_ratios:
self.l_ratios = self.set_ratios()
f_current_duration = self.portfolio_duration(d_position)
# check were should hedge and what quantity
f_main_pos = -d_position[self.s_main]['qBid']
f_main_pos -= -d_position[self.s_main]['qAsk']
l_hedged_position = []
l_pos = [f_main_pos]
l_du = [self.last_du[self.s_main]]
l_pu = [self.last_pu[self.s_main]]
for s_instr, f_ratio in zip(self.l_hedging_instr, self.l_ratios):
if s_instr == self.s_main:
s_action = 'BUY'
if f_main_pos < 0:
s_action = 'SELL'
if f_main_pos == 0:
return []
return [(s_action, s_instr, f_main_pos)]
f_aux_pos = -d_position[s_instr]['qBid']
f_aux_pos -= -d_position[s_instr]['qAsk']
l_hedged_position.append(f_aux_pos*f_ratio)
l_pos.append(f_aux_pos)
l_du.append(self.last_du[s_instr])
l_pu.append(self.last_pu[s_instr])
f_main_position = f_main_pos + sum(np.array(l_hedged_position))
na_to_hedge = np.array([f_main_position] * len(l_hedged_position))
na_to_hedge /= np.array(self.l_ratios)
na_sign = np.sign(na_to_hedge)
na_mult = 5 * na_sign
if sum((abs(na_to_hedge)/5) < 1) != 0:
na_to_hedge = np.ceil(abs(na_to_hedge)/5).astype(int) * na_mult
else:
na_to_hedge = np.round(abs(na_to_hedge)/5).astype(int) * na_mult
l_to_hedge = list(na_to_hedge)
l_rtn = []
for idx, s_instr in enumerate(self.l_hedging_instr):
i_qty = -l_to_hedge[idx]
if i_qty != 0:
l_pos_aux = l_pos[:]
l_pos_aux[idx+1] += i_qty
f_future_duration = self._get_duration(l_pu, l_du, l_pos_aux)
f_abs_dur = abs(f_future_duration)
# if qty is not enough to dicrease the duration, increase it
if f_abs_dur > 1.2 and f_abs_dur < 3.:
i_qty *= 2
elif f_abs_dur >= 3.:
i_qty *= 3
l_pos_aux = l_pos[:]
l_pos_aux[idx+1] += i_qty
f_future_duration = self._get_duration(l_pu, l_du, l_pos_aux)
# recalculate all
if abs(f_future_duration) < abs(f_current_duration):
# change to rate quantity
s_action = 'BUY'
if -i_qty < 0:
s_action = 'SELL'
l_rtn.append((s_action, s_instr, -i_qty))
return l_rtn
class KRDModel(RiskModel):
'''
A key rate duration model representation that uses the KRDs selected to
decide what instruments sould be used in the immunization of a portfolio
'''
def __init__(self, env, l_krd, f_portfolio_value=10**6, s_kind='trava'):
'''
Initiate a KRDModel object. Save all parameters as attributes
:param env: Environment object. the environment that uses this object
:param l_krd: list. maturity of the key rates used, in years
:param f_portfolio_value*: float. The total
'''
super(KRDModel, self).__init__(env, f_portfolio_value)
self.s_risk_model = 'KRDModel_{}'.format(s_kind)
self.l_krd = l_krd
self.df_ratios = None
self.l_cmm_target = ['DI1F19', 'DI1F21', 'DI1F23']
self.s_kind = s_kind
def portfolio_krd(self, d_position):
'''
Return a tuple with the key rate durations of a portfolio and all
information needed to recalculate it
:param d_position: dictionary. portfolio to be hedged
'''
# recover variables
f_facevalue = 10.**5
l_rates = []
l_pos = []
l_maturity = []
l_instrument = []
for s_key, idx in self.env.order_matching.d_map_book_list.iteritems():
book_obj = self.env.order_matching.l_order_books[idx]
l_instrument.append(book_obj.s_instrument)
f_du = self.env.l_du[self.env.order_matching.idx][idx]
f_price, f_qty = book_obj.best_bid
f_pos = -d_position[s_key]['qBid'] # inverto para qty em PU ?
f_pos -= -d_position[s_key]['qAsk']
l_maturity.append(f_du/252.)
l_pos.append(f_pos)
l_rates.append(f_price)
# get the key rate duration matrix
l_exp_pu = [f_facevalue * np.exp(-f_rate/100 * f_mat)
for f_rate, f_mat in zip(l_rates, l_maturity)]
l_pu = [f_facevalue * (1.+f_rate/100)**(-f_mat)
for f_rate, f_mat in zip(l_rates, l_maturity)]
l_dPdYP = [f_facevalue * f_mat * np.exp(-f_rate/100 * f_mat)
for f_rate, f_mat in zip(l_rates, l_maturity)]
df_krd = self.key_rates(l_dPdYP, l_exp_pu)
na_weights = self._get_weights(l_pu, l_pos)
df_exposure = self._get_krd_exposure(df_krd, na_weights)
t_rtn = (df_krd, na_weights, df_exposure, l_maturity, l_pos, l_pu,
l_instrument)
return t_rtn
def _get_krd_exposure(self, df_krd, na_weights):
'''
Return the exposure in KRDs based on krds passed and weights
:param df_krd: data frame. KRD of the instruments traded
:param na_weights: numpy array. the weight in portfolio of eack KRD
'''
df_exposure = pd.Series(df_krd.T.dot(na_weights))
df_exposure.index = self.l_krd
return df_exposure
def key_rates(self, l_dPdYP, l_pu):
'''
Return the matrix of key rates durations for the instruments traded
in the environment
:param l_dPdYP: list. $\frac{dP * P}{dY}$
:param l_pu: list. PU of aeach contract
'''
# add up the linear contributions $s(t, t_i)\$ for $i=1, 2, ..., m$ to
# obtain the change in the given zero-coupon rate $\Delta y(t)$
if isinstance(self.df_ratios, type(None)):
self._set_linear_contributions()
df = self.df_ratios
return df.apply(lambda x: x * np.array(l_dPdYP) / np.array(l_pu),
axis=0)
def get_target_krds(self, l_cmm, d_data, df_krd, s_kind='fly'):
'''
Rerturn the target krds pandas serties to be the same of a buttlerfly.
:param l_cmm: list. instruments used in the butterfly, ordered by matry
:param d_data: dictionary. maturity and PU of each instrument
:param s_kind*: string. the kind of target to return
'''
# calculate positions
if s_kind == 'fly':
f_Qm = 1. # quantity at the middle of the structure
f_alpha = (d_data[l_cmm[2]][1] * 1. - d_data[l_cmm[1]][1])
f_alpha /= (d_data[l_cmm[2]][1] / 1. - d_data[l_cmm[0]][1])
f_Qs = (f_Qm * f_alpha * d_data[l_cmm[1]][0]) / d_data[l_cmm[0]][0]
f_Ql = (f_Qm * (1 - f_alpha) * d_data[l_cmm[1]][0])
f_Ql /= d_data[l_cmm[2]][0]
l_pos = [-f_Qs, f_Qm, -f_Ql]
elif s_kind == 'trava':
l_pu = [d_data[s_key][0] for s_key in l_cmm]
l_mat = [d_data[s_key][1] for s_key in l_cmm]
l_pos = [0., 10, 0.]
na_weights = self._get_weights(l_pu, l_pos)
f_curr_duration = sum(np.array(l_mat) * na_weights)
l_pos_aux = []
for s_key in self.l_hedging_instr:
f_pu = d_data[s_key][0]
f_matr = d_data[s_key][1]
f_dur_aux = 5. * f_pu / self.f_portfolio_value * f_matr
f_unt = -f_curr_duration / f_dur_aux * 5.
l_pos_aux.append(f_unt)
l_pos = [l_pos_aux[0]/20.] + [1.] + [l_pos_aux[1]/20.]
# calculate targe
l_p = [d_data[l_cmm[0]][0], d_data[l_cmm[1]][0], d_data[l_cmm[2]][0]]
na_weights = self._get_weights(l_p, l_pos)
df_target = pd.Series(df_krd.T.dot(na_weights))
df_target.index = self.l_krd
return df_target
def _set_linear_contributions(self):
'''
Define the linear contribution $s(t, t_i)$ made by the change in the
ith key rate, $\Delta y(t_i)$, to the change in a given zero-coupon
rate $\Delta y(t)$, according to Nawalkha, 266
'''
l_maturity = []
l_krd = self.l_krd
# recover data from books
for s_key, idx in self.env.order_matching.d_map_book_list.iteritems():
f_du = self.env.l_du[self.env.order_matching.idx][idx]
l_maturity.append(f_du/252.)
# create the $s(t, t_i)$ matrix, according to Nawalkha, 266
l = []
i_last_idx = len(l_krd) - 1
for i_list, f_mat in enumerate(l_maturity):
l.append([])
for idx in xrange(len(l_krd)):
f_krd = l_krd[idx]
if idx == 0:
f_krd1 = l_krd[idx+1]
if f_mat < f_krd:
l[i_list].append(1.)
elif f_mat > f_krd1:
l[i_list].append(0.)
else:
l[i_list].append((f_krd1 - f_mat)/(f_krd1-f_krd))
elif idx == i_last_idx:
f_krd_1 = l_krd[idx-1]
if f_mat > f_krd:
l[i_list].append(1.)
elif f_mat < f_krd_1:
l[i_list].append(0.)
else:
l[i_list].append((f_mat - f_krd_1)/(f_krd-f_krd_1))
else:
f_krd1 = l_krd[idx+1]
f_krd_1 = l_krd[idx-1]
if (f_mat >= f_krd_1) & (f_mat <= f_krd):
l[i_list].append((f_mat - f_krd_1)/(f_krd-f_krd_1))
elif (f_mat >= f_krd) & (f_mat <= f_krd1):
l[i_list].append((f_krd1 - f_mat)/(f_krd1-f_krd))
elif (f_mat < f_krd_1) | (f_mat > f_krd1):
l[i_list].append(0.)
else:
l[i_list].append(0.)
self.df_ratios = pd.DataFrame(l)
def _get_instruments_to_hedge(self, d_position):
'''
Return a list of tuples with the instruments and quantities that can be
used to hedge a given portfolio (in rate, not PU)
:param d_position: dictionary. portfolio in qty of contracts
'''
# measure the KRDs of the current portfolios
f_portfolio_value = self.f_portfolio_value
t_rtn = self.portfolio_krd(d_position)
df_krd, na_weights, df_expos, l_mat, l_pos, l_pu, l_instr = t_rtn
d_aux = dict(zip(l_instr, zip(l_pu, l_mat,
np.cumsum(len(l_instr) * [1])-1)))
df_target = self.get_target_krds(self.l_cmm_target, d_aux, df_krd,
s_kind=self.s_kind)
# NOTE: Why I am inverting the signal? I dont know
# ... maybe something related to positions in PU and rates
df_target *= (l_pos[d_aux[self.l_cmm_target[1]][2]])
# calculate the current duration and distance for the target in
# absolute percentage
f_curr_duration = sum(np.array(l_mat) * na_weights)
f_curr_abs_target = sum(abs((df_expos-df_target)/df_target))
# check which hedge will drive the strategy closer to the target
f_min_abs_target = f_curr_abs_target
l_rtn = []
for idx, s_key in enumerate(self.l_hedging_instr):
f_pu = d_aux[s_key][0]
f_matr = d_aux[s_key][1]
f_dur_aux = 5. * f_pu / f_portfolio_value * f_matr
f_unt = np.round(-f_curr_duration / f_dur_aux)
if abs(f_unt) > 10e-6:
s_debug = '\t{}: {:0.2f}, {:0.2f}'
# limit the number of contracts that can be traded at each time
i_qty = float(f_unt*5)
if f_unt > 3.:
i_qty = 15.
elif f_unt < -3.:
i_qty = -15.
# simulate how would be the measures doing the hedge
# recalculate all
idx = d_aux[s_key][2]
l_pos_aux = l_pos[:]
l_pos_aux[idx] += i_qty
na_weights_aux = self._get_weights(l_pu, l_pos_aux)
f_aux_duration = sum(np.array(l_mat) * na_weights_aux)
df_expos_aux = self._get_krd_exposure(df_krd, na_weights_aux)
f_aux_abs_target = sum(abs((df_expos_aux-df_target)/df_target))
# === DEBUG ===
# print s_debug.format(s_key, f_aux_duration, f_aux_abs_target)
# =============
# check the hedge instrument that will drive down the krd most
if abs(f_aux_duration) < abs(f_curr_duration):
if f_aux_abs_target < f_min_abs_target:
f_min_abs_target = f_aux_abs_target
# the quantity is in PU. So Convert to rate
s_action = 'BUY'
if -i_qty < 0:
s_action = 'SELL'
l_rtn = [(s_action, s_key, -i_qty)]
return l_rtn
class SingleHedgeModel(RiskModel):
'''
A SingleHedgeModel model representation that immunize portfolio using just
one instrument
'''
def __init__(self, env, f_portfolio_value=10**6, s_instrument='DI1F19'):
'''
Initiate a KRDModel object. Save all parameters as attributes
:param env: Environment object. the environment that uses this object
:param l_krd: list. maturity of the key rates used, in years
:param f_portfolio_value*: float. The total
'''
super(SingleHedgeModel, self).__init__(env, f_portfolio_value)
self.s_risk_model = 'SingleHedgeModel'
self.l_hedging_instr = [s_instrument]
class GreedyHedgeModel(RiskModel):
'''
A GreedyHedgeModel checks if the the market is offering a good deal to
hedge the agent's position. The immunization is done using a duration
neutral strategy that used just one instrument. The 'good deal' notion
should be implemented as something related to price, time or even
fair-priceness quant struff
'''
def __init__(self, env, f_value=10**6, s_instrument='DI1F19',
s_fairness='spread'):
'''
Initiate a GreedyHedgeModel object. Save all parameters as attributes
:param env: Environment object. the environment that uses this object
:param s_fairness*: string. the fair price notion of the agent
:param f_value*: float. The total value available
'''
super(GreedyHedgeModel, self).__init__(env, f_value)
self.s_fairness = s_fairness
if s_fairness == 'spread':
self.func_fair_price = self._compare_to_spread
elif s_fairness == 'closeout':
# closeout also should include stoploss?
self.func_fair_price = self._compare_to_closeout
s_instrument = env.s_main_intrument
self.s_risk_model = 'GreedyHedge_{}'.format(s_fairness)
self.l_hedging_instr = [s_instrument]
self.main_hedge = s_instrument
self.f_target = 0.03 # could be smaller when closeout (2 bps?)
self.f_stop = 0.03
self.last_txt = ''
self.current_price = None
self.f_last_gain = None
self.f_last_loss = None
self.price_stop_buy = None
self.price_stop_sell = None
def set_gain_loss(self, f_gain, f_loss):
'''
Set a target to the agent stop trading on the session
:param f_gain: float.
:param f_loss: float.
'''
self.f_last_gain = f_gain
self.f_last_loss = f_loss
def can_open_position(self, s_side, agent):
'''
Check the positions limits of an agent
:param s_side: string. Side of the trade to check the limit
:param agent: Agent object. agent that need to hedge
'''
if not self.l_ratios:
self.l_ratios = self.set_ratios()
# recover position limits
s_instr = self.env.s_main_intrument
f_max_pos = agent.max_pos
f_max_disclosed = agent.max_disclosed_pos
# calculate the current position
f_pos = agent.position[s_instr]['qBid']
f_pos -= agent.position[s_instr]['qAsk']
f_pos_discl = f_pos + agent.disclosed_position[s_instr]['qBid']
f_pos_discl -= agent.disclosed_position[s_instr]['qAsk']
f_pnlt = 0.
# check if can open position to a specific side
if s_side == 'ASK':
if f_pos <= f_max_pos * -1:
return False
elif f_pos_discl <= f_max_disclosed * -1:
return False
elif s_side == 'BID':
if f_pos >= f_max_pos:
return False
elif f_pos_discl >= f_max_disclosed:
return False
return True
def should_open_at_current_price(self, s_side, agent):
'''
'''
# recover position limits
s_instr = self.env.s_main_intrument
f_pnlt = 0.
if agent.f_pnl < -1500.:
f_pnlt = self.f_stop / 3. * 3.
elif agent.f_pnl < -1000.:
f_pnlt = self.f_stop / 3. * 2
elif agent.f_pnl < -500.:
f_pnlt = self.f_stop / 3. * 1.
# calculate the current position
f_pos = agent.position[s_instr]['qBid']
f_pos -= agent.position[s_instr]['qAsk']
f_pos_discl = f_pos + agent.disclosed_position[s_instr]['qBid']
f_pos_discl -= agent.disclosed_position[s_instr]['qAsk']
# recover prices
book_obj = agent.env.get_order_book(s_instr)
f_current_bid, i_qbid = book_obj.best_bid
f_current_ask, i_qask = book_obj.best_ask
f_bidask_spread = (f_current_ask - f_current_bid)
# check if there is something wierd in the prices
if (f_bidask_spread <= 0.005) or (f_bidask_spread > 0.04):
# print 'wierd bid-ask spread', f_bidask_spread
return False
# check if can open position based on the last stop
if self.price_stop_sell and s_side == 'ASK':
f_check = self.price_stop_sell
if f_current_ask >= f_check - f_pnlt:
if f_current_ask <= f_check + f_pnlt:
# print 'last time of stop at ask', f_check
return False
if self.price_stop_buy and s_side == 'BID':
f_check = self.price_stop_buy
if f_current_bid >= f_check - f_pnlt:
if f_current_bid <= f_check + f_pnlt:
# print 'last time of stop at bid', f_check
return False
# check if can open positions based on the last price traded
if f_pos < 0 and s_side == 'ASK':
l_agent_prices = [f_p for f_p, f_q, d_tob in
agent.d_trades[s_instr][s_side]]
f_min = min(l_agent_prices) - f_pnlt
f_max = max(l_agent_prices) + f_pnlt
if f_current_ask >= f_min and f_current_ask <= f_max:
# print 'same prices at ask', f_current_ask, f_max, f_min
return False
elif f_pos > 0 and s_side == 'BID':
l_agent_prices = [f_p for f_p, f_q, d_tob in
agent.d_trades[s_instr][s_side]]
f_min = min(l_agent_prices) - f_pnlt
f_max = max(l_agent_prices) + f_pnlt
if f_current_bid >= f_min and f_current_bid <= f_max:
# print 'same prices at bid', f_current_bid, f_max, f_min
return False
elif f_pos_discl > 0 and s_side == 'ASK':
f_agent_price = agent.current_open_price
if abs(f_current_ask - f_agent_price) < 0.005:
# print 'too low at ask', f_current_ask, f_agent_price
return False
elif f_pos_discl < 0 and s_side == 'BID':
f_agent_price = agent.current_open_price
if abs(f_current_bid - f_agent_price) < 0.005:
# print 'too low at bid', f_current_bid, f_agent_price
return False
return True
def should_hedge_open_position(self, agent):
'''
Check if the current open position should be hedged
:param agent: Agent object. agent that need to hedge
'''
# recover position limits
s_instr = self.env.s_main_intrument
f_pos = agent.position[s_instr]['qBid']
f_pos -= agent.position[s_instr]['qAsk']
f_pos_discl = f_pos + agent.disclosed_position[s_instr]['qBid']
f_pos_discl -= agent.disclosed_position[s_instr]['qAsk']
# recover price from hedging instrument
obj_book = self.env.get_order_book(self.main_hedge)
if f_pos_discl < 0:
f_price, f_qty = obj_book.best_ask
elif f_pos_discl > 0:
f_price, f_qty = obj_book.best_bid
# check if is fair to mound a spread
if f_pos_discl != 0 and f_pos != 0:
s_side = 'ASK'
if f_pos > 0:
s_side = 'BID'
if not self.func_fair_price(f_price, f_pos_discl, agent, s_side):
return False
print '.',
# close out open positions by the current mid
if s_instr != self.main_hedge:
obj_book = self.env.get_order_book(s_instr)
f_ask, f_qty = obj_book.best_ask
f_bid, f_qty = obj_book.best_bid
f_mid = (f_ask + f_bid)/2.
if f_pos_discl < 0:
f_qty = abs(f_pos_discl)
f_vol = f_qty * f_mid
agent.disclosed_position[s_instr]['qBid'] += f_qty
agent.disclosed_position[s_instr]['Bid'] += f_vol
elif f_pos_discl > 0:
f_qty = abs(f_pos_discl)
f_vol = f_qty * f_mid
agent.disclosed_position[s_instr]['qAsk'] += f_qty
agent.disclosed_position[s_instr]['Ask'] += f_vol
return True
def get_instruments_to_hedge(self, agent):
'''
Return a list of tuples with the instruments and quantities that can be
used to hedge a given portfolio
:param agent: Agent object. agent that need to hedge
'''
# TODO: if s_fairness==closeout, should "hedge" on the main instrument
d_position = agent.position
return self._get_instruments_to_hedge(d_position)
def should_stop_disclosed(self, agent):
'''
Return if the agent should stop the current disclosed position or not
:param agent: Agent object. agent that need to hedge
'''
s_instr = self.env.s_main_intrument
# calculate the current position
f_pos = agent.position[s_instr]['qBid']
f_pos -= agent.position[s_instr]['qAsk']
f_pos_discl = f_pos + agent.disclosed_position[s_instr]['qBid']
f_pos_discl -= agent.disclosed_position[s_instr]['qAsk']
f_agent_price = agent.current_open_price
if not f_agent_price or f_pos_discl == 0.:
if self.b_stop_trading:
agent.done = True
return False
f_ref_price = f_agent_price
# recover prices
book_obj = agent.env.get_order_book(s_instr)
f_current_bid, i_qbid = book_obj.best_bid
f_current_ask, i_qask = book_obj.best_ask
f_bidask_spread = (f_current_ask - f_current_bid)
# check if there is something weird with the spread
if (f_bidask_spread <= 0.005) or (f_bidask_spread > 0.03):
return False
# check if should stop to trade
if self.b_stop_trading:
return True
if self.f_last_gain:
f_pnl = agent.f_pnl - 40. # due to MtM
if f_pnl > self.f_last_gain:
self.b_stop_trading = True
return True
elif f_pnl < self.f_last_loss:
self.b_stop_trading = True
return True
# check if should execute the stop gain
if f_pos_discl > 0:
update_maxmin(f_current_bid, agent)
f_ref_price = max(agent.current_max_price, f_ref_price)
f_loss = f_ref_price - self.f_stop
if f_current_bid < f_loss:
if i_qbid <= 600.:
return True
return f_current_bid < f_loss - self.f_stop/2.
elif f_pos_discl < 0:
update_maxmin(f_current_ask, agent)
f_ref_price = min(agent.current_min_price, f_ref_price)
f_loss = f_ref_price + self.f_stop
if f_current_ask > f_loss:
if i_qask <= 600.:
return True
return f_current_ask > f_loss + self.f_stop/2.
return False
def _compare_to_spread(self, f_current_price, f_open_pos, agent, s_side):
'''
...
:param f_current_price: float. The current price in the hedging instr
:param f_open_pos: float. the current disclosed position
:param agent: Agent object. agent that need to hedge
'''
# short_current_price >= (long_avg_price-avg_spread_price + param)
if f_open_pos > 0:
f_param = self.f_target # NOTE: hard coded
elif f_open_pos < 0:
f_param = -self.f_target # NOTE: hard coded
s_instr = self.env.s_main_intrument
s_hedge = self.main_hedge
# s_side = 'ASK'
# if f_open_pos > 0:
# s_side = 'BID'
# implement the prices accountability
idx = int(abs(f_open_pos/agent.order_size))
l_disclosed = agent.d_trades[s_instr][s_side][-idx:]
if len(l_disclosed) == 0:
print 'no disclosed position'
print '--open'
pprint.pprint(agent.d_trades)
print '--position'
pprint.pprint(agent.position)
print '--disclosed'
print agent.disclosed_position
print '--param'
print s_side, f_open_pos
raise NotImplementedError
f_long_avg_price = 0.
f_avg_spread = 0.
f_qtot = 0.
for f_p, f_q, d_tob in l_disclosed:
f_long_avg_price += f_p*f_q
f_qtot += f_q
f_aux = (d_tob[s_instr]['Ask'] + d_tob[s_instr]['Bid'])/2.
f_aux -= (d_tob[s_hedge]['Ask'] + d_tob[s_hedge]['Bid'])/2.
f_avg_spread += f_aux * f_q
f_long_avg_price /= f_qtot
f_avg_spread /= f_qtot
f_fair_price = (f_long_avg_price - f_avg_spread + f_param)
# keep the price into memory of the agent
agent.current_open_price = f_long_avg_price
s_err = 'PRICE: {}, DISCL: {}, AVG SPREAD: {}, MY PRICE: {}'
s_err += ', CURRNT: {}'
s_err = s_err.format(f_fair_price, f_open_pos, f_avg_spread,
f_long_avg_price, f_current_price)
if self.last_txt != s_err:
# print s_err
self.last_txt = s_err
if f_open_pos > 0:
return f_current_price >= f_fair_price
elif f_open_pos < 0:
return f_current_price <= f_fair_price
def _compare_to_closeout(self, f_current_price, f_open_pos, agent, s_side):
'''
'''
# short_current_price >= (long_avg_price-avg_spread_price + param)
s_instr = self.env.s_main_intrument
idx = int(abs(f_open_pos/agent.order_size))
l_disclosed = agent.d_trades[s_instr][s_side][-idx:]
f_long_avg_price = 0.
f_avg_spread = 0.
f_qtot = 0.
for f_p, f_q, d_tob in l_disclosed:
f_long_avg_price += f_p*f_q
f_qtot += f_q
f_long_avg_price /= f_qtot
f_avg_spread /= f_qtot
f_fair_price = (f_long_avg_price + self.f_target)
# keep the price into memory of the agent
agent.current_open_price = f_long_avg_price
s_err = 'POS: {}, MY PRICE: {}, CURRNT: {}, MAX: {}, MIN: {}'
s_err = s_err.format(f_open_pos, f_long_avg_price, f_current_price,
agent.current_max_price, agent.current_min_price)
if self.last_txt != s_err:
# print s_err + '\n'
self.last_txt = s_err
# recover prices
book_obj = agent.env.get_order_book(s_instr)
f_current_bid, i_qbid = book_obj.best_bid
f_current_ask, i_qask = book_obj.best_ask
f_bidask_spread = (f_current_ask - f_current_bid)
# check if there is something wierd in the prices
if (f_bidask_spread <= 0.005) or (f_bidask_spread > 0.04):
return False
# check if should execute the stop gain
if f_open_pos > 0:
f_gain = f_long_avg_price + self.f_target
if f_current_bid >= f_gain:
if i_qbid <= 400.:
return True
return f_current_bid > f_gain + self.f_target/2.
elif f_open_pos < 0:
f_gain = f_long_avg_price - self.f_target
if f_current_ask <= f_gain:
if i_qask <= 400.:
return True
return f_current_ask < f_gain - self.f_target/2.
return False
| 40.72973 | 79 | 0.578979 | 5,070 | 34,661 | 3.634122 | 0.08856 | 0.015631 | 0.011235 | 0.009118 | 0.579647 | 0.52863 | 0.468765 | 0.420624 | 0.390285 | 0.345346 | 0 | 0.013919 | 0.330487 | 34,661 | 850 | 80 | 40.777647 | 0.780057 | 0.084014 | 0 | 0.40868 | 0 | 0 | 0.018672 | 0 | 0 | 0 | 0 | 0.001176 | 0 | 0 | null | null | 0 | 0.007233 | null | null | 0.019892 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7d803a9aa0c5e2c7510ceac09d326b16dcb098e1 | 9,946 | py | Python | PP4E/Examples/PP4E/Ai/ExpertSystem/holmes/holmes2/forward.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | PP4E/Examples/PP4E/Ai/ExpertSystem/holmes/holmes2/forward.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | PP4E/Examples/PP4E/Ai/ExpertSystem/holmes/holmes2/forward.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | #
# module forward.py
#
# forward chaining inference engine
# see holmes/forward.py and holmes.doc for more info;
#
# optimization: uses known fact and rule 'if' indexes to avoid:
# a) exhaustive fact list search when matching an 'if'
# b) exhaustive fact list scan when seeing if fact redundant
# c) exhaustive fact list scan when seeing if should ask user
# d) reselecting and refiring rule/binding on each iteration
#
# only tries rules suggested (triggered) by facts added
# during the last iteration (restarts from top again);
#
# could be made slightly faster by using '(x,y)' tree rep
# for lists (proof list, etc.), but the gain would be minor
# compared to the index tree improvement;
#
# known fact list is now an index tree (members() generates
# the old list, but it is no longer in deduction-order);
###########################################################################
from match import *
from index import Index
from kbase import external, internal
from time import time
stop_chaining = 'stop_chaining'
def forward(rules, facts, *pmode):
time1 = time()
global kbase # avoid extra args
kbase = rules
known = initialize(facts, kbase)
try:
chain(facts+[['true']], known, kbase) # adds to 'known'
except stop_chaining: pass # user can stop it
return report(known, pmode, time1)
def chain(newfacts, known, kbase):
global user_answers # avoid extra args
while 1:
user_answers = 0
rules = triggered(newfacts, kbase) # if part in new
if not rules:
break
solns = bindings(rules, known) # all 'if's matched
if not solns and not user_answers:
break
newfacts = fire(solns, known) # add 'then' to known
if not newfacts and not user_answers:
break # no new facts added, or
# ask_user added no facts
#######################################################
# create fact index and init iteration counts;
# store_unique would remove redundant initial facts;
#######################################################
def initialize(facts, kbase):
known = Index().init()
for fact in facts:
known.store(fact, (fact, 'initial')) # fact, proof
known.store(['true'], (['true'], 'atomic')) # if true then...
for rule in kbase.rules:
rule['trigger'] = 0
return known
#################################################
# add 'then' parts of matched rules/bindings
# store_unique() might speed finding duplicates;
#################################################
def fire(solns, known):
added = []
for (rule, dict, proof) in solns:
for then in rule['then']:
fact = substitute(then, dict)
if fact[0] == 'delete':
if known.search_unique(fact[1:]):
known.delete(fact[1:])
added.append(['not'] + fact)
else:
if not known.search_unique(fact):
known.store(fact, (fact, (rule['rule'], proof)) )
added.append(fact)
return added
#############################################
# pick rules with matched 'if' parts;
# returns list with no redundant rules;
#############################################
trigger_id = 1
def triggered(newfacts, kbase):
global trigger_id
res = []
for fact in newfacts:
for rule in kbase.match_if(fact):
if rule['trigger'] != trigger_id:
res.append(rule)
rule['trigger'] = trigger_id
trigger_id = trigger_id + 1
return res
#####################################################
# generate bindings for rule's 'if' conjunction,
# for all rules triggered by latest deductions;
# note: 'not' goals must match explicitly asserted
# 'not' facts: we just match the whole 'not';
#####################################################
def bindings(triggered, known):
solns = []
for rule in triggered:
for (dict, proof) in conjunct(rule['if'], known, {}, rule['rule']):
solns.append((rule, dict, proof))
return solns
def conjunct(ifs, known, dict, why):
if ifs == []:
return [(copy_dict(dict), [])]
res = []
head, tail = ifs[0], ifs[1:]
if head[0] == 'ask':
term = substitute(head[1:], dict)
if ask_user(term, known, why):
for (dict2, proof2) in conjunct(tail, known, dict, why):
res.append((dict2, [(term, 'told')] + proof2))
else:
for (fact, proof) in known.search(head, dict):
matched, changes = match(head, fact, dict, {})
if matched:
for (dict2, proof2) in conjunct(tail, known, dict, why):
res.append((dict2, [(fact, proof)] + proof2))
for (var, env) in changes:
env[var] = '?'
return res
########################################################
# assorted stuff; dictionary copies should be built-in,
# since dictionary assignment 'shares' the same object;
########################################################
def copy_dict(dict):
res = {}
for f in dict.keys(): res[f] = dict[f]
return res
##########################################################
# the 'why' explanation in forward chaining just lists
# the rule containing the asked goal;
##########################################################
def ask_user(fact, known, why):
global user_answers
if known.search_unique(fact):
return 1
elif known.search_unique(['not'] + fact):
return 0
user_answers = 1
while 1:
ans = raw_input('is this true: ' + external([fact]) + ' ?')
if ans in ['y','Y','yes','YES']:
known.store(fact, (fact, 'told'))
return 1
elif ans in ['n','N','no','NO']:
known.store(['not']+fact, (['not']+fact, 'told'))
return 0
elif ans == 'why':
print 'to see if rule', why, 'can be applied'
elif ans == 'where':
print_solns(known, None)
elif ans == 'browse':
kbase.browse_pattern(raw_input('enter browse pattern: '))
elif ans == 'stop':
raise stop_chaining
else:
print 'what? ',
print '(expecting "y", "n", "why", "where", "browse", or "stop")'
######################################################
# 'how' explanations require us to construct proof
# trees for each fact added to the known facts list;
######################################################
def report(known, pmode, time1):
filter = None
if pmode:
if pmode[0] == None:
return known
else:
filter = pmode[0]
time2 = time() - time1
print_solns(known, filter)
print 'time: ', time2
show_proofs(known)
def print_solns(known, filter):
sources = {'rule':[], 'told':[], 'init':[], 'atom':[]}
for (fact, proof) in known.members():
if not filter or match(filter, fact, {}, {})[0]:
if type(proof) == type(()):
sources['rule'].append((fact, proof)) # deduced
elif proof == 'told' or proof == 'not':
sources['told'].append(fact)
elif proof == 'initial':
sources['init'].append(fact)
elif proof == 'atomic':
sources['atom'].append(fact)
if not sources['rule']:
print 'I have not deduced any new facts.'
else:
print 'I deduced these facts...'
for (fact, proof) in sources['rule']:
print ' ', external([fact]) #, '(by rule',proof[0]+')'
if sources['told']:
print 'You told me these facts...'
for fact in sources['told']:
print ' ', external([fact])
if sources['init']:
print 'I started with these facts...'
for fact in sources['init']:
print ' ', external([fact])
# ignore sources['atom']
def show_proofs(known):
while 1:
print
ans = raw_input('show proofs? ')
if ans in ['y','Y','yes','YES']:
[patt] = internal(raw_input('enter deductions pattern: '))
for (fact, proof) in known.members():
if match(patt, fact, {}, {})[0]:
trace_tree((fact, proof), 0)
elif ans in ['n','N','no','NO']:
break
elif ans == 'where':
print_solns(known, None)
elif ans == 'browse':
kbase.browse_pattern(raw_input('enter browse pattern: '))
else:
print 'what? (expecting "y", "n", "where", or "browse")'
def trace_tree((fact, proof), level):
print ' ' * level,
print '"' + external([fact]) + '"',
if proof == 'told':
print 'was your answer'
elif proof == 'initial':
print 'was on your initial facts list'
elif proof == 'atomic':
print 'is an absolute truth'
elif proof == 'not':
print 'was a negative answer, or was ommitted'
else:
rule, subproof = proof
print 'was deduced by firing rule', rule
for branch in subproof:
trace_tree(branch, level+3)
| 28.096045 | 81 | 0.478082 | 1,058 | 9,946 | 4.452741 | 0.236295 | 0.017194 | 0.007642 | 0.011887 | 0.133517 | 0.11038 | 0.099342 | 0.060284 | 0.060284 | 0.060284 | 0 | 0.006026 | 0.332596 | 9,946 | 353 | 82 | 28.175637 | 0.703676 | 0.177961 | 0 | 0.27027 | 0 | 0.005405 | 0.108555 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.005405 | 0.021622 | null | null | 0.12973 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7d8c2a23670b05afd3505faf37ad0aff75f308fd | 5,073 | py | Python | vcommand/libs/crypto.py | virink/vCommand | 328dd5a8bc9390c5edde80f5544d797f54690f91 | [
"MIT"
] | 7 | 2019-08-01T14:57:34.000Z | 2019-11-26T12:12:17.000Z | vcommand/libs/crypto.py | virink/vCommand | 328dd5a8bc9390c5edde80f5544d797f54690f91 | [
"MIT"
] | null | null | null | vcommand/libs/crypto.py | virink/vCommand | 328dd5a8bc9390c5edde80f5544d797f54690f91 | [
"MIT"
] | 2 | 2019-08-16T04:52:50.000Z | 2019-11-26T12:12:25.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Author : Virink <virink@outlook.com>
Date : 2019/04/18, 14:49
"""
import string
import re
L = string.ascii_lowercase
U = string.ascii_uppercase
A = string.ascii_letters
def func_atbash(*args):
"""埃特巴什码解码"""
arg = args[0]
arg = arg.lower().replace(' ', 'vvvzzzvvv')
res = [L[25 - j] for i in arg for j in range(26) if i == L[j]]
return ''.join(res).replace('eeeaaaeee', ' ')
def __caesar(offset, arg):
"""凯撒编码 : 内部调用"""
result = ""
for ch in arg:
if ch.isupper():
result += U[((U.index(ch) + offset) % 26)]
elif ch.islower():
result += L[((L.index(ch) + offset) % 26)]
elif ch.isdigit():
result += ch
else:
result += ch
return result
def func_caesar(*args):
"""凯撒编码"""
res = []
for offset in range(26):
res.append("[+] offset : %d\tresult : %s" %
(offset, __caesar(offset, args[0])))
return "\r\n".join(res)
def func_rot13(*args):
"""rot13"""
return __caesar(13, args[0])
def func_mpkc(*args):
"""手机键盘编码 Mobile Phone Keyboard Cipher"""
T = {
'A': 21, 'B': 22, 'C': 23, 'D': 31, 'E': 32, 'F': 33,
'G': 41, 'H': 42, 'I': 43, 'J': 51, 'K': 52, 'L': 53,
'M': 61, 'N': 62, 'O': 63, 'P': 71, 'Q': 72, 'R': 73, 'S': 74,
'T': 81, 'U': 82, 'V': 83, 'W': 91, 'X': 92, 'Y': 93, 'Z': 94
}
arg = args[0].upper()
if arg[0] in U:
return ','.join([str(T.get(i, i)) for i in arg])
else:
T = {str(T[k]): k for k in T}
if ',' in arg:
arg = arg.split(',')
elif ' ' in arg:
arg = arg.split(' ')
return ''.join([T.get(i, i) for i in arg])
def func_morse(*args):
"""摩斯电码"""
T = {
'A': '.-', 'B': '-...', 'C': '-.-.',
'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..',
'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---',
'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..',
'0': '-----', '1': '.----', '2': '..---',
'3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..',
'9': '----.',
',': '--..--', '.': '.-.-.-', ':': '---...', ';': '-.-.-.',
'?': '..--..', '=': '-...-', "'": '.----.', '/': '-..-.',
'!': '-.-.--', '-': '-....-', '_': '..--.-', '(': '-.--.',
')': '-.--.-', '$': '...-..-', '&': '. . . .', '@': '.--.-.',
'{': '----.--', '}': '-----.-'
}
arg = args[0]
if re.match(r'^[\.\-\/ ]+$', arg):
T = {str(T[k]): k for k in T}
if len(args) > 1:
arg = ' '.join(args)
arg = arg.replace('/', ' ').split(' ')
# TODO: morse auto decode when it is not sep
# p = 0
# res = ''
# d = 5
# while p < (len(arg)+7) and d > 0:
# print("[D] len : %d p : %d" % (len(arg), p))
# for j in [6, 5, 4, 3, 2, 1, 0]:
# tmp = T.get(arg[p:p+j], None)
# print("[D] tmp = arg[%d:%s] = %s => %s" %
# (p, j, arg[p:p+j], tmp))
# if tmp:
# p = p+j
# res += tmp
# break
# # p = p+j-1
# # break
# d -= 1
# print("[D] Result : %s" % res)
return ''.join([T.get(i) for i in arg])
else:
return '/'.join([str(T.get(i, '?')) for i in arg.upper()])
def func_peigen(*args):
"""培根密码"""
T = {
'H': 'aabbb', 'G': 'aabba', 'R': 'baaab', 'Q': 'baaaa',
'Z': 'bbaab', 'Y': 'bbaaa', 'N': 'abbab', 'M': 'abbaa',
'U': 'babaa', 'V': 'babab', 'I': 'abaaa', 'J': 'abaab',
'F': 'aabab', 'E': 'aabaa', 'A': 'aaaaa', 'B': 'aaaab',
'T': 'baabb', 'S': 'baaba', 'C': 'aaaba', 'D': 'aaabb',
'P': 'abbbb', 'O': 'abbba', 'K': 'ababa', 'L': 'ababb',
'W': 'babba', 'X': 'babbb'
}
arg = args[0]
if re.match(r'^[ab]+$', arg):
T = {str(T[k]): k for k in T}
return ''.join([T.get(arg[i:i+5]) for i in range(0, len(arg), 5)])
else:
return ''.join([T.get(i.upper()) for i in arg])
def __vigenere(s, key='virink', de=0):
"""维吉利亚密码"""
s = str(s).replace(" ", "").upper()
key = str(key).replace(" ", "").upper()
res = ''
i = 0
while i < len(s):
j = i % len(key)
k = U.index(key[j])
m = U.index(s[i])
if de:
if m < k:
m += 26
res += U[m - k]
else:
res += U[(m + k) % 26]
i += 1
return res
def func_vigenere(*args):
"""维吉利亚密码"""
if len(args) < 2:
return '[-] Vigenere Usage : command key text [isdecode]'
return __vigenere(args[1], args[0], 1 if len(args) >= 3 else 0)
| 30.196429 | 74 | 0.350089 | 628 | 5,073 | 2.794586 | 0.294586 | 0.025641 | 0.023932 | 0.030769 | 0.173219 | 0.126496 | 0.083191 | 0.046724 | 0.02963 | 0.02963 | 0 | 0.039138 | 0.350286 | 5,073 | 167 | 75 | 30.377246 | 0.493325 | 0.130298 | 0 | 0.140351 | 0 | 0 | 0.142032 | 0 | 0 | 0 | 0 | 0.005988 | 0 | 1 | 0.078947 | false | 0 | 0.017544 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7d953acfe0d26007513dac6a05f6317497155128 | 712 | py | Python | backend/streetsignup/migrations/0002_auto_20200901_1758.py | nicoepp/the-prayer-walk | 6c8217c33f399cfe46dc23075e13ca9464079cae | [
"MIT"
] | null | null | null | backend/streetsignup/migrations/0002_auto_20200901_1758.py | nicoepp/the-prayer-walk | 6c8217c33f399cfe46dc23075e13ca9464079cae | [
"MIT"
] | null | null | null | backend/streetsignup/migrations/0002_auto_20200901_1758.py | nicoepp/the-prayer-walk | 6c8217c33f399cfe46dc23075e13ca9464079cae | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-09-01 17:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('streetsignup', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='segment',
name='street',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='segments', to='streetsignup.street'),
),
migrations.AlterField(
model_name='subscription',
name='street',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='streetsignup.street'),
),
]
| 28.48 | 132 | 0.634831 | 75 | 712 | 5.946667 | 0.506667 | 0.071749 | 0.09417 | 0.147982 | 0.273543 | 0.273543 | 0.273543 | 0.273543 | 0.273543 | 0.273543 | 0 | 0.03525 | 0.242978 | 712 | 24 | 133 | 29.666667 | 0.792208 | 0.063202 | 0 | 0.333333 | 1 | 0 | 0.15188 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7d9767476bcf26c64a3560357db2dd0c005504a9 | 9,830 | py | Python | deepchem/feat/molecule_featurizers/coulomb_matrices.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
] | 3,782 | 2016-02-21T03:53:11.000Z | 2022-03-31T16:10:26.000Z | deepchem/feat/molecule_featurizers/coulomb_matrices.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
] | 2,666 | 2016-02-11T01:54:54.000Z | 2022-03-31T11:14:33.000Z | deepchem/feat/molecule_featurizers/coulomb_matrices.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
] | 1,597 | 2016-02-21T03:10:08.000Z | 2022-03-30T13:21:28.000Z | """
Generate coulomb matrices for molecules.
See Montavon et al., _New Journal of Physics_ __15__ (2013) 095003.
"""
import numpy as np
from typing import Any, List, Optional
from deepchem.utils.typing import RDKitMol
from deepchem.utils.data_utils import pad_array
from deepchem.feat.base_classes import MolecularFeaturizer
class CoulombMatrix(MolecularFeaturizer):
"""Calculate Coulomb matrices for molecules.
Coulomb matrices provide a representation of the electronic structure of
a molecule. For a molecule with `N` atoms, the Coulomb matrix is a
`N X N` matrix where each element gives the strength of the
electrostatic interaction between two atoms. The method is described
in more detail in [1]_.
Examples
--------
>>> import deepchem as dc
>>> featurizers = dc.feat.CoulombMatrix(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file)
References
----------
.. [1] Montavon, Grégoire, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self,
max_atoms: int,
remove_hydrogens: bool = False,
randomize: bool = False,
upper_tri: bool = False,
n_samples: int = 1,
seed: Optional[int] = None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
upper_tri: bool, optional (default False)
Generate only upper triangle part of Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.upper_tri = upper_tri
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate Coulomb matrices for molecules. If extra randomized
matrices are generated, they are treated as if they are features
for additional conformers.
Since Coulomb matrices are symmetric, only the (flattened) upper
triangular portion is returned.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The coulomb matrices of the given molecule.
The default shape is `(num_confs, max_atoms, max_atoms)`.
If num_confs == 1, the shape is `(max_atoms, max_atoms)`.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
features = self.coulomb_matrix(datapoint)
if self.upper_tri:
features = [f[np.triu_indices_from(f)] for f in features]
features = np.asarray(features)
if features.shape[0] == 1:
# `(1, max_atoms, max_atoms)` -> `(max_atoms, max_atoms)`
features = np.squeeze(features, axis=0)
return features
def coulomb_matrix(self, mol: RDKitMol) -> np.ndarray:
"""
Generate Coulomb matrices for each conformer of the given molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The coulomb matrices of the given molecule
"""
try:
from rdkit import Chem
from rdkit.Chem import AllChem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
# Check whether num_confs >=1 or not
num_confs = len(mol.GetConformers())
if num_confs == 0:
mol = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol, AllChem.ETKDG())
if self.remove_hydrogens:
mol = Chem.RemoveHs(mol)
n_atoms = mol.GetNumAtoms()
z = [atom.GetAtomicNum() for atom in mol.GetAtoms()]
rval = []
for conf in mol.GetConformers():
d = self.get_interatomic_distances(conf)
m = np.outer(z, z) / d
m[range(n_atoms), range(n_atoms)] = 0.5 * np.array(z)**2.4
if self.randomize:
for random_m in self.randomize_coulomb_matrix(m):
random_m = pad_array(random_m, self.max_atoms)
rval.append(random_m)
else:
m = pad_array(m, self.max_atoms)
rval.append(m)
return np.asarray(rval)
def randomize_coulomb_matrix(self, m: np.ndarray) -> List[np.ndarray]:
"""Randomize a Coulomb matrix as decribed in [1]_:
1. Compute row norms for M in a vector row_norms.
2. Sample a zero-mean unit-variance noise vector e with dimension
equal to row_norms.
3. Permute the rows and columns of M with the permutation that
sorts row_norms + e.
Parameters
----------
m: np.ndarray
Coulomb matrix.
Returns
-------
List[np.ndarray]
List of the random coulomb matrix
References
----------
.. [1] Montavon et al., New Journal of Physics, 15, (2013), 095003
"""
rval = []
row_norms = np.asarray([np.linalg.norm(row) for row in m], dtype=float)
rng = np.random.RandomState(self.seed)
for i in range(self.n_samples):
e = rng.normal(size=row_norms.size)
p = np.argsort(row_norms + e)
new = m[p][:, p] # permute rows first, then columns
rval.append(new)
return rval
@staticmethod
def get_interatomic_distances(conf: Any) -> np.ndarray:
"""
Get interatomic distances for atoms in a molecular conformer.
Parameters
----------
conf: rdkit.Chem.rdchem.Conformer
Molecule conformer.
Returns
-------
np.ndarray
The distances matrix for all atoms in a molecule
"""
n_atoms = conf.GetNumAtoms()
coords = [
# Convert AtomPositions from Angstrom to bohr (atomic units)
conf.GetAtomPosition(i).__idiv__(0.52917721092) for i in range(n_atoms)
]
d = np.zeros((n_atoms, n_atoms), dtype=float)
for i in range(n_atoms):
for j in range(i):
d[i, j] = coords[i].Distance(coords[j])
d[j, i] = d[i, j]
return d
class CoulombMatrixEig(CoulombMatrix):
"""Calculate the eigenvalues of Coulomb matrices for molecules.
This featurizer computes the eigenvalues of the Coulomb matrices for provided
molecules. Coulomb matrices are described in [1]_.
Examples
--------
>>> import deepchem as dc
>>> featurizers = dc.feat.CoulombMatrixEig(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file)
References
----------
.. [1] Montavon, Grégoire, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
"""
def __init__(self,
max_atoms: int,
remove_hydrogens: bool = False,
randomize: bool = False,
n_samples: int = 1,
seed: Optional[int] = None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate eigenvalues of Coulomb matrix for molecules. Eigenvalues
are returned sorted by absolute value in descending order and padded
by max_atoms.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The eigenvalues of Coulomb matrix for molecules.
The default shape is `(num_confs, max_atoms)`.
If num_confs == 1, the shape is `(max_atoms,)`.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
cmat = self.coulomb_matrix(datapoint)
features_list = []
for f in cmat:
w, v = np.linalg.eig(f)
w_abs = np.abs(w)
sortidx = np.argsort(w_abs)
sortidx = sortidx[::-1]
w = w[sortidx]
f = pad_array(w, self.max_atoms)
features_list.append(f)
features = np.asarray(features_list)
if features.shape[0] == 1:
# `(1, max_atoms)` -> `(max_atoms,)`
features = np.squeeze(features, axis=0)
return features
| 31.812298 | 88 | 0.653713 | 1,271 | 9,830 | 4.95122 | 0.225806 | 0.033053 | 0.013348 | 0.015255 | 0.532338 | 0.522326 | 0.497537 | 0.486413 | 0.477197 | 0.477197 | 0 | 0.010481 | 0.24293 | 9,830 | 308 | 89 | 31.915584 | 0.835125 | 0.509563 | 0 | 0.376068 | 1 | 0 | 0.046186 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059829 | false | 0.017094 | 0.068376 | 0 | 0.188034 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7d9a43e7079b4241b2e56a68cd01b2edf6c43289 | 1,697 | py | Python | data_utils/dataset/kodak_dataset.py | hieu1999210/image_compression | 3faf90d704782e1d6a186b0c8ea7fb1e2ec97a2c | [
"Apache-2.0"
] | null | null | null | data_utils/dataset/kodak_dataset.py | hieu1999210/image_compression | 3faf90d704782e1d6a186b0c8ea7fb1e2ec97a2c | [
"Apache-2.0"
] | null | null | null | data_utils/dataset/kodak_dataset.py | hieu1999210/image_compression | 3faf90d704782e1d6a186b0c8ea7fb1e2ec97a2c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Hieu Nguyen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from glob import glob
from PIL import Image
from torch.utils.data import Dataset
from ..transforms import get_transforms
from .build import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class KodakDataset(Dataset):
def __init__(self, data_folder, mode, cfg, **kwargs):
"""
"""
super().__init__()
self.cfg = cfg
self.paths = sorted(glob(f"{data_folder}/*"))
print(f"There are {len(self)} image in {mode} dataset")
self.transforms = get_transforms(cfg, mode)
def __len__(self):
return len(self.paths)
def __getitem__(self, idx):
"""
"""
path = self.paths[idx]
image_id = os.path.split(path)[-1].replace(".png", "")
img = self._load_img(idx)
img = self.transforms(img)
return image_id, img
def _load_img(self, idx):
"""
args: image path
return: pillow image
"""
image = Image.open(self.paths[idx]).convert('RGB')
return image
| 26.936508 | 80 | 0.61815 | 216 | 1,697 | 4.726852 | 0.513889 | 0.058766 | 0.025465 | 0.031342 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006944 | 0.236299 | 1,697 | 62 | 81 | 27.370968 | 0.780864 | 0.392457 | 0 | 0 | 0 | 0 | 0.070157 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0 | 0.24 | 0.04 | 0.56 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
7da3966430bc2a6549730b528f313eb6f4d29793 | 7,990 | py | Python | zp_database/make_zp/create_hard_xray_zp.py | sajid-ali-nu/zone_plate_testing | c50afd575a6e733fce265db2ab8cc1c7b21cfe69 | [
"MIT"
] | null | null | null | zp_database/make_zp/create_hard_xray_zp.py | sajid-ali-nu/zone_plate_testing | c50afd575a6e733fce265db2ab8cc1c7b21cfe69 | [
"MIT"
] | null | null | null | zp_database/make_zp/create_hard_xray_zp.py | sajid-ali-nu/zone_plate_testing | c50afd575a6e733fce265db2ab8cc1c7b21cfe69 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# This script generates a zone plate pattern (based on partial filling) given the material, energy, grid size and number of zones as input
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from numba import njit
from joblib import Parallel, delayed
from tqdm import tqdm, trange
import urllib,os,pickle
from os.path import dirname as up
# Importing all the required libraries. Numba is used to optimize functions.
# In[2]:
def repeat_pattern(X,Y,Z):
flag_ = np.where((X>0)&(Y>0))
flag1 = np.where((X>0)&(Y<0))
flag1 = tuple((flag1[0][::-1],flag1[1]))
Z[flag1] = Z[flag_]
flag2 = np.where((X<0)&(Y>0))
flag2 = tuple((flag2[0],flag2[1][::-1]))
Z[flag2] = Z[flag_]
flag3 = np.where((X<0)&(Y<0))
flag3 = tuple((flag3[0][::-1],flag3[1][::-1]))
Z[flag3] = Z[flag_]
return Z
# *repeat_pattern* : produces the zone plate pattern given the pattern in only one quadrant(X,Y>0) as input.
# * *Inputs* : X and Y grid denoting the coordinates and Z containing the pattern in one quadrant.
# * *Outputs* : Z itself is modified to reflect the repition.
# In[3]:
def get_property(mat,energy):
url = "http://henke.lbl.gov/cgi-bin/pert_cgi.pl"
data = {'Element':str(mat), 'Energy':str(energy), 'submit':'Submit Query'}
data = urllib.parse.urlencode(data)
data = data.encode('utf-8')
req = urllib.request.Request(url, data)
resp = urllib.request.urlopen(req)
respDat = resp.read()
response = respDat.split()
d = b'g/cm^3<li>Delta'
i = response.index(d)
delta = str(response[i+2])[:str(response[i+2]).index('<li>Beta')][2:]
beta = str(response[i+4])[2:-1]
return float(delta),float(beta)
# *get_property* : gets delta and beta for a given material at the specified energy from Henke et al.
# * *Inputs* : mat - material, energy - energy in eV
# * *Outputs* : delta, beta
# In[4]:
@njit # equivalent to "jit(nopython=True)".
def partial_fill(x,y,step,r1,r2,n):
x_ = np.linspace(x-step/2,x+step/2,n)
y_ = np.linspace(y-step/2,y+step/2,n)
cnts = 0
for i in range(n):
for j in range(n):
z = (x_[i] * x_[i] + y_[j] * y_[j])
if r1*r1 < z < r2*r2:
cnts += 1
fill_factor = cnts/(n*n)
return fill_factor
# *partial_fill* : workhorse function for determining the fill pattern. This function is thus used in a loop. njit is used to optimize the function.
# * *Inputs* : x,y - coordinates of the point, step - step size, r1,r2 - inner and outer radii of ring, n - resolution
# * *Outputs* : fill_factor - value of the pixel based on amount of ring passing through it
# In[5]:
#find the radius of the nth zone
def zone_radius(n,f,wavel):
return np.sqrt(n*wavel*f + ((n*wavel)/2)**2)
# *zone_radius* : functon to find the radius of a zone given the zone number and wavelength
# * *Inputs* : n - zone number, f - focal length, wavel - wavelength
# * *Outputs* : radius of the zone as specified by the inputs
# In[6]:
def make_quadrant(X,Y,flag,r1,r2,step,n,zone_number):
z = np.zeros(np.shape(X))
Z = np.sqrt(X**2+Y**2)
for l in range(len(flag[0])):
i = flag[0][l]
j = flag[1][l]
if 0.75*r1< Z[i][j] < 1.25*r2:
x1 = X[i][j]
y1 = Y[i][j]
z[i][j] = partial_fill(x1,y1,step,r1,r2,n)
z[tuple((flag[1],flag[0]))] = z[tuple((flag[0],flag[1]))]
return z
# *make_quadrant* : function used to create a quadrant of a ring given the inner and outer radius and zone number
# * *Inputs* : X,Y - grid, flag - specifies the quadrant to be filled (i.e. where X,Y>0), r1,r2 - inner and outer radii, n - parameter for the partial_fill function
# * *Outputs* : z - output pattern with one quadrant filled.
# In[7]:
#2D ZP
def make_ring(i):
print(i)
r1 = radius[i-1]
r2 = radius[i]
n = 250
ring = make_quadrant(X,Y,flag,r1,r2,step_xy,n,zone_number = i)
ring = repeat_pattern(X,Y,ring)
ring_ = np.where(ring!=0)
vals_ = ring[ring_]
np.save('ring_locs_'+str(i)+'.npy',ring_)
np.save('ring_vals_'+str(i)+'.npy',vals_)
return
# *make_ring* : function used to create a ring given the relevant parameters
# * *Inputs* : i-zone number,radius - array of radii ,X,Y - grid, flag - specifies the quadrant to be filled (i.e. where X,Y>0),n - parameter for the partial_fill function
# * *Outputs* : None. Saves the rings to memory.
# In[8]:
mat = 'Au'
energy = 10000 #Energy in EV
f = 10e-3 #focal length in meters
wavel = (1239.84/energy)*10**(-9) #Wavelength in meters
delta,beta = get_property(mat,energy)
zones = 700 #number of zones
radius = np.zeros(zones)
# Setting up the parameters and initializing the variables.
# In[9]:
for k in range(zones):
radius[k] = zone_radius(k,f,wavel)
# Filling the radius array with the radius of zones for later use in making the rings.
# In the next few code blocks, we check if the parameters of the simulation make sense. First we print out the input and output pixel sizes assuming we will be using the 1FT propagator. Then we see if the pixel sizes are small enough compared to the outermost zone width. Finally we check if the focal spot can be contained for the given amount of tilt angle.
# In[10]:
grid_size = 55296
input_xrange = 262e-6
step_xy = input_xrange/grid_size
L_out = (1239.84/energy)*10**(-9)*f/(input_xrange/grid_size)
step_xy_output = L_out/grid_size
print(' Ouput L : ',L_out)
print(' output pixel size(nm) : ',step_xy_output*1e9)
print(' input pixel size(nm) : ',step_xy*1e9)
# In[11]:
drn = radius[-1]-radius[-2]
print(' maximum radius(um) : ',radius[-1]*1e6)
print(' outermost zone width(nm) :',drn*1e9)
# In[12]:
print(' max shift of focal spot(um) : ',(L_out/2)*1e6)
# invert the following to get max tilt allowance
# after which the focal spot falls of the
# simulation plane
# np.sin(theta*(np.pi/180))*f = (L_out/2)
theta_max = np.arcsin((L_out/2)*(1/f))*(180/np.pi)
print(' max wavefield aligned tilt(deg) : ',theta_max)
# In[13]:
if step_xy > 0.25*drn :
print(' WARNING ! input pixel size too small')
print(' ratio of input step size to outermost zone width', step_xy/drn)
if step_xy_output > 0.25*drn :
print(' WARNING ! output pixel size too small')
print(' ratio of output step size to outermost zone width', step_xy_output/drn)
# In[14]:
zones_to_fill = []
for i in range(zones):
if i%2 == 1 :
zones_to_fill.append(i)
zones_to_fill = np.array(zones_to_fill)
# Making a list of zones to fill. (Since only alternate zones are filled in our case. This can be modified as per convenience)
# In[ ]:
try :
os.chdir(up(os.getcwd())+str('/hard_xray_zp'))
except :
os.mkdir(up(os.getcwd())+str('/hard_xray_zp'))
os.chdir(up(os.getcwd())+str('/hard_xray_zp'))
# Store the location of each ring of the zone plate separately in a sub directory. This is more efficient than storing the whole zone plate array !
# In[ ]:
x1 = input_xrange/2
x = np.linspace(-x1,x1,grid_size)
step_xy = x[-1]-x[-2]
zp_coords =[-x1,x1,-x1,x1]
# In[ ]:
X,Y = np.meshgrid(x,x)
flag = np.where((X>0)&(Y>0)&(X>=Y))
# Creating the input 1D array and setting the parameters for use by the make ring function.
# Note that X,Y,flag and step_xy will be read by multiple processes which we will spawn using joblib.
# In[ ]:
get_ipython().run_cell_magic('capture', '', 'from joblib import Parallel, delayed \nresults = Parallel(n_jobs=5)(delayed(make_ring)(i) for i in zones_to_fill)')
# Creating the rings ! (Adjust the number of jobs depending on CPU cores.)
# In[ ]:
params = {'grid_size':grid_size,'step_xy':step_xy,'energy(in eV)':energy,'wavelength in m':wavel,'focal_length':f,'zp_coords':zp_coords,'delta':delta,'beta':beta}
pickle.dump(params,open('parameters.pickle','wb'))
# Pickling and saving all the associated parameters along with the rings for use in simulation!
| 29.592593 | 359 | 0.659324 | 1,372 | 7,990 | 3.76312 | 0.253644 | 0.005423 | 0.012783 | 0.008716 | 0.148363 | 0.108851 | 0.096068 | 0.070502 | 0.03099 | 0.019369 | 0 | 0.031352 | 0.197622 | 7,990 | 269 | 360 | 29.702602 | 0.773982 | 0.431539 | 0 | 0.016807 | 1 | 0.008403 | 0.162946 | 0.008929 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05042 | false | 0 | 0.067227 | 0.008403 | 0.168067 | 0.10084 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7daef8b7f43d19ad4b4a4241d53911344a3bad74 | 675 | py | Python | ABNOOrchestrator/ABNOParameters.py | HPNLAB/ABNO-FUTEBOL | 3a1dbee11abd9a808d337a6bbdccba052671d33c | [
"Apache-2.0"
] | null | null | null | ABNOOrchestrator/ABNOParameters.py | HPNLAB/ABNO-FUTEBOL | 3a1dbee11abd9a808d337a6bbdccba052671d33c | [
"Apache-2.0"
] | null | null | null | ABNOOrchestrator/ABNOParameters.py | HPNLAB/ABNO-FUTEBOL | 3a1dbee11abd9a808d337a6bbdccba052671d33c | [
"Apache-2.0"
] | null | null | null | __author__ = 'alejandroaguado'
from xml.etree import ElementTree
class ABNOParameters:
def __init__(self, filename):
self.document = ElementTree.parse(filename)
root = self.document.getroot()
tag = self.document.find('abnoconfig')
self.address=tag.attrib['address']
self.port = int(tag.attrib['port'])
tag = self.document.find('pceconfig')
self.pceaddress = tag.attrib['address']
self.pceport = int(tag.attrib['port'])
tag = self.document.find('pmconfig')
self.pmaddress = tag.attrib['address']
self.pmport = int(tag.attrib['port'])
#tag = self.document.find('properties') | 35.526316 | 51 | 0.638519 | 75 | 675 | 5.64 | 0.413333 | 0.170213 | 0.141844 | 0.179669 | 0.248227 | 0.248227 | 0.248227 | 0.248227 | 0 | 0 | 0 | 0 | 0.219259 | 675 | 19 | 52 | 35.526316 | 0.802657 | 0.056296 | 0 | 0 | 0 | 0 | 0.117739 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7dafc11fd8fb86ab44db99cb63fe8f3a5c118843 | 277 | py | Python | influencer-detection/src/api/influencers/api/v1.py | luisblazquezm/influencer-detection | bd8aec83cbd8e5fbb3231824b5e274c47f491501 | [
"Apache-2.0"
] | 4 | 2021-05-22T16:33:41.000Z | 2021-11-22T23:44:40.000Z | influencer-detection/src/api/influencers/api/v1.py | Alburrito/influencer-detection | bd8aec83cbd8e5fbb3231824b5e274c47f491501 | [
"Apache-2.0"
] | null | null | null | influencer-detection/src/api/influencers/api/v1.py | Alburrito/influencer-detection | bd8aec83cbd8e5fbb3231824b5e274c47f491501 | [
"Apache-2.0"
] | 2 | 2021-05-21T16:34:14.000Z | 2021-09-29T12:59:49.000Z | #!flask/bin/python
# Copyright 2021 Luis Blazquez Miñambres (@luisblazquezm)
# See LICENSE for details.
from flask_restx import Api
api = Api(version='1.0',
title='Influencer Detection Project',
description="**PORBI Influencer Detection project's Flask RESTX API**") | 27.7 | 75 | 0.747292 | 36 | 277 | 5.722222 | 0.75 | 0.097087 | 0.252427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025316 | 0.144404 | 277 | 10 | 75 | 27.7 | 0.843882 | 0.353791 | 0 | 0 | 0 | 0 | 0.491525 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7db2d15a3db81041f88feba1273d33752a9d0183 | 1,730 | py | Python | filestream.py | ziyua/filestream | b79e9dc550d39c6bd5685eb0311f11d3a63537d9 | [
"Apache-2.0"
] | null | null | null | filestream.py | ziyua/filestream | b79e9dc550d39c6bd5685eb0311f11d3a63537d9 | [
"Apache-2.0"
] | null | null | null | filestream.py | ziyua/filestream | b79e9dc550d39c6bd5685eb0311f11d3a63537d9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: gb2312 -*-
import fileinput
import os
class FileStream:
def __init__(self, filename, cutsize=2048):
self.filename = filename
self.cutsize = cutsize # 2048 byte
self.size = os.path.getsize(self.filename)
self.file = fileinput.input(filename)
self.Buff = ''
self.fileStream = self._filestream()
def cuttimes(self):
if self.lastsize() == 0:
return self.size / self.cutsize
elif self.lastsize() >= 0:
return self.size / self.cutsize + 1
def lastsize(self):
return self.size % self.cutsize
def _bytestream(self):
for line in self.file:
for byte in line:
yield byte
def _filestream(self):
bytestream = self._bytestream()
for k in range(self.size):
byte = bytestream.next()
self.Buff += byte
if len(self.Buff) == self.cutsize:
data = self.Buff
self.Buff = ''
yield data
else:
if len(self.Buff) != 0:
data = self.Buff
self.Buff = ''
yield data
def getstream(self):
# have not more content, return <type 'None'>.
try:
content = self.fileStream.next()
except StopIteration:
self.file.close()
return
else:
return content
if __name__ == '__main__':
fs = FileStream('1.txt', 1024)
print fs.cuttimes()
print fs.lastsize()
while 1:
fby = fs.getstream()
if fby is not None:
print '--------'
print fby, len(fby)
else:
break
| 25.441176 | 54 | 0.514451 | 186 | 1,730 | 4.698925 | 0.333333 | 0.073227 | 0.05492 | 0.061785 | 0.181922 | 0.153318 | 0.153318 | 0.086957 | 0 | 0 | 0 | 0.020484 | 0.379191 | 1,730 | 67 | 55 | 25.820896 | 0.793296 | 0.054335 | 0 | 0.185185 | 0 | 0 | 0.012868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.037037 | null | null | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7db6acccc13d73c452c9d80805e389c51f138158 | 346 | py | Python | Backend/linux.py | TheInvincibleLearner/simranquirky.github.io | 21a2524b321493b9ff82eb8b4fcc10af8f8face7 | [
"MIT"
] | null | null | null | Backend/linux.py | TheInvincibleLearner/simranquirky.github.io | 21a2524b321493b9ff82eb8b4fcc10af8f8face7 | [
"MIT"
] | 10 | 2021-09-29T13:25:21.000Z | 2021-10-05T13:51:36.000Z | Backend/linux.py | TheInvincibleLearner/simranquirky.github.io | 21a2524b321493b9ff82eb8b4fcc10af8f8face7 | [
"MIT"
] | 7 | 2021-09-22T13:26:35.000Z | 2021-10-05T03:07:43.000Z | #!/usr/bin/python3
print("content-type: text/html")
print()
import subprocess as sp
import cgi
fs = cgi.FieldStorage()
cmd = fs.getvalue("command")
output = sp.getoutput("sudo "+cmd)
print("<body style='padding: 40px;'>")
print('<h1 style="color:#df405a;" >Output</h1>')
print("<pre>{}</pre>".format(output))
print("</body>")
| 20.352941 | 49 | 0.635838 | 46 | 346 | 4.782609 | 0.652174 | 0.081818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026846 | 0.138728 | 346 | 16 | 50 | 21.625 | 0.711409 | 0.049133 | 0 | 0 | 0 | 0 | 0.394231 | 0.070513 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.545455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
7dbc7331779b26c50f838cb805bfffb5e23cfa30 | 542 | py | Python | pytorch3dunet/unet3d/config.py | VolkerH/pytorch-3dunet | 01ee7d53ef1c8edb2bd45d76faf7df447144fb67 | [
"MIT"
] | null | null | null | pytorch3dunet/unet3d/config.py | VolkerH/pytorch-3dunet | 01ee7d53ef1c8edb2bd45d76faf7df447144fb67 | [
"MIT"
] | null | null | null | pytorch3dunet/unet3d/config.py | VolkerH/pytorch-3dunet | 01ee7d53ef1c8edb2bd45d76faf7df447144fb67 | [
"MIT"
] | null | null | null | import argparse
import torch
import yaml
def load_config():
parser = argparse.ArgumentParser(description='UNet3D training')
parser.add_argument('--config', type=str, help='Path to the YAML config file', required=True)
args = parser.parse_args()
config = _load_config_yaml(args.config)
# Get a device to train on
device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
config['device'] = device
return config
def _load_config_yaml(config_file):
return yaml.load(open(config_file, 'r'))
| 27.1 | 97 | 0.714022 | 76 | 542 | 4.934211 | 0.526316 | 0.08 | 0.069333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004444 | 0.169742 | 542 | 19 | 98 | 28.526316 | 0.828889 | 0.04428 | 0 | 0 | 0 | 0 | 0.129845 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.230769 | 0.076923 | 0.538462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
7dbeb142bc5611ae233fb17f68720f678cc9d5f9 | 2,031 | py | Python | client/src/proto3/socket_server.py | andrhahn/pi-spy | 04013565c83eb20db85688c0abb23d6f83d3fbaa | [
"MIT"
] | 1 | 2020-08-17T18:32:06.000Z | 2020-08-17T18:32:06.000Z | client/src/proto3/socket_server.py | andrhahn/pi-spy | 04013565c83eb20db85688c0abb23d6f83d3fbaa | [
"MIT"
] | null | null | null | client/src/proto3/socket_server.py | andrhahn/pi-spy | 04013565c83eb20db85688c0abb23d6f83d3fbaa | [
"MIT"
] | null | null | null | import SocketServer
import io
import logging
import struct
import threading
import PIL.Image
import pika
import config
logging.basicConfig(level=logging.INFO)
class RequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
print 'Process socket connections thread:', threading.current_thread().name
try:
mf = self.request.makefile('rb')
while True:
image_len = struct.unpack('<L', mf.read(struct.calcsize('<L')))[0]
image_bytes = mf.read(image_len)
if not image_len:
break
image_stream = io.BytesIO()
image_stream.write(image_bytes)
image_stream.seek(0)
image = PIL.Image.open(image_stream)
image.verify()
print 'Image verified.'
queue_channel = queue_connection.channel()
queue_channel.exchange_declare(exchange='images', exchange_type='fanout')
queue_channel.basic_publish(exchange='images', routing_key='', body=image_bytes)
print 'Sent image.'
finally:
print 'Disconnected with client'
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == "__main__":
print 'Connecting to queue server'
queue_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=config.get('queue_server_host'), port=int(config.get('queue_server_port'))))
socket_server_port = int(config.get('socket_server_port'))
print 'Starting socket server on port ', socket_server_port
socket_server = ThreadedTCPServer((config.get('socket_server_host'), socket_server_port), RequestHandler)
try:
socket_server.serve_forever()
except KeyboardInterrupt:
pass
print 'Closing queue connection'
queue_connection.close()
print 'Stopping socket server'
socket_server.shutdown()
socket_server.server_close()
| 24.46988 | 115 | 0.652388 | 215 | 2,031 | 5.934884 | 0.427907 | 0.103448 | 0.050157 | 0.031348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001326 | 0.257509 | 2,031 | 82 | 116 | 24.768293 | 0.844828 | 0 | 0 | 0.081633 | 0 | 0 | 0.142294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.040816 | 0.163265 | null | null | 0.163265 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7dbf4c0c61fb56b588d550f32b9ba42ac0a71e93 | 3,506 | py | Python | Thirdparty/libpsd/build.py | stinvi/dava.engine | 2b396ca49cdf10cdc98ad8a9ffcf7768a05e285e | [
"BSD-3-Clause"
] | 26 | 2018-09-03T08:48:22.000Z | 2022-02-14T05:14:50.000Z | Thirdparty/libpsd/build.py | ANHELL-blitz/dava.engine | ed83624326f000866e29166c7f4cccfed1bb41d4 | [
"BSD-3-Clause"
] | null | null | null | Thirdparty/libpsd/build.py | ANHELL-blitz/dava.engine | ed83624326f000866e29166c7f4cccfed1bb41d4 | [
"BSD-3-Clause"
] | 45 | 2018-05-11T06:47:17.000Z | 2022-02-03T11:30:55.000Z | import os
import shutil
import build_utils
def get_supported_targets(platform):
if platform == 'win32':
return ['win32']
elif platform == 'darwin':
return ['macos']
elif platform == 'linux':
return ['linux']
else:
return []
def get_dependencies_for_target(target):
if target == 'win32':
return ['zlib']
else:
return []
def build_for_target(target, working_directory_path, root_project_path):
if target == 'win32':
_build_win32(working_directory_path, root_project_path)
elif target == 'macos':
_build_macos(working_directory_path, root_project_path)
elif target == 'linux':
_build_linux(working_directory_path, root_project_path)
def get_download_info():
return 'https://sourceforge.net/projects/libpsd/files/libpsd/0.9/libpsd-0.9.zip'
def _download_and_extract(working_directory_path):
source_folder_path = os.path.join(working_directory_path, 'libpsd_source')
url = get_download_info()
build_utils.download_and_extract(
url,
working_directory_path,
source_folder_path,
build_utils.get_url_file_name_no_ext(url))
return source_folder_path
@build_utils.run_once
def _patch_sources(source_folder_path, working_directory_path):
build_utils.apply_patch(
os.path.abspath('patch_v0.9.diff'), working_directory_path)
shutil.copyfile(
'CMakeLists.txt', os.path.join(source_folder_path, 'CMakeLists.txt'))
def _build_win32(working_directory_path, root_project_path):
source_folder_path = _download_and_extract(working_directory_path)
_patch_sources(source_folder_path, working_directory_path)
cmake_flags = ['-DZLIB_INCLUDE_DIR=' + os.path.join(working_directory_path, '../zlib/zlib_source/')]
build_utils.build_and_copy_libraries_win32_cmake(
os.path.join(working_directory_path, 'gen'),
source_folder_path,
root_project_path,
'psd.sln', 'psd',
'psd.lib', 'psd.lib',
'libpsd.lib', 'libpsd.lib',
'libpsd.lib', 'libpsd.lib',
cmake_flags,
static_runtime=False)
_copy_headers(source_folder_path, root_project_path)
def _build_macos(working_directory_path, root_project_path):
source_folder_path = _download_and_extract(working_directory_path)
_patch_sources(source_folder_path, working_directory_path)
build_utils.build_and_copy_libraries_macos_cmake(
os.path.join(working_directory_path, 'gen'),
source_folder_path,
root_project_path,
'psd.xcodeproj', 'psd',
'libpsd.a',
'libpsd.a')
_copy_headers(source_folder_path, root_project_path)
def _build_linux(working_directory_path, root_project_path):
source_folder_path = _download_and_extract(working_directory_path)
_patch_sources(source_folder_path, working_directory_path)
build_utils.build_and_copy_libraries_linux_cmake(
gen_folder_path=os.path.join(working_directory_path, 'gen'),
source_folder_path=source_folder_path,
root_project_path=root_project_path,
target="all",
lib_name='libpsd.a')
_copy_headers(source_folder_path, root_project_path)
def _copy_headers(source_folder_path, root_project_path):
include_path = os.path.join(root_project_path, 'Libs/include/libpsd')
build_utils.copy_files_by_name(
os.path.join(source_folder_path, 'include'),
include_path,
['libpsd.h', 'psd_color.h', 'psd_types.h'])
| 31.585586 | 104 | 0.72162 | 454 | 3,506 | 5.092511 | 0.180617 | 0.152249 | 0.190311 | 0.12327 | 0.643166 | 0.626298 | 0.514273 | 0.497405 | 0.38192 | 0.38192 | 0 | 0.006952 | 0.179407 | 3,506 | 110 | 105 | 31.872727 | 0.796663 | 0 | 0 | 0.289157 | 0 | 0.012048 | 0.112094 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.120482 | false | 0 | 0.036145 | 0.012048 | 0.253012 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7dc1969b2d44d9ad370f7f09a3b9e9919cb4e854 | 589 | py | Python | Combinatorialifier.py | Theta291/Partial-Application-in-Python | db503fbf7a1c173c01fca86a858875e38c41997a | [
"MIT"
] | null | null | null | Combinatorialifier.py | Theta291/Partial-Application-in-Python | db503fbf7a1c173c01fca86a858875e38c41997a | [
"MIT"
] | null | null | null | Combinatorialifier.py | Theta291/Partial-Application-in-Python | db503fbf7a1c173c01fca86a858875e38c41997a | [
"MIT"
] | null | null | null | #Exercise: Try to make a function that accepts a function of only positional arguments and returns a function that takes the same number of positional arguments and, given they are all iterators, attempts every combination of one arguments from each iterator.
#Skills: Partial application, Iteration
papplycomboreverse = lambda fun, xiter : lambda *args : [fun(*args, x) for x in xiter]
def combo(fun):
def returnfun(*args):
currfun = fun
for arg in reversed(args):
currfun = papplycomboreverse(currfun, arg)
return currfun()
return returnfun
| 45.307692 | 259 | 0.726655 | 79 | 589 | 5.417722 | 0.620253 | 0.063084 | 0.060748 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.212224 | 589 | 12 | 260 | 49.083333 | 0.922414 | 0.502547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7dc490740f712aa8ee9b1a1e793a10bb7cab5ed9 | 27,885 | py | Python | trove-11.0.0/trove/guestagent/datastore/experimental/vertica/service.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 1 | 2020-04-08T07:42:19.000Z | 2020-04-08T07:42:19.000Z | trove/guestagent/datastore/experimental/vertica/service.py | ttcong/trove | 1db2dc63fdd5409eafccebe79ff2900d0535ed13 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | trove/guestagent/datastore/experimental/vertica/service.py | ttcong/trove | 1db2dc63fdd5409eafccebe79ff2900d0535ed13 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import tempfile
from oslo_log import log as logging
from oslo_utils import netutils
from six.moves import configparser
from trove.common import cfg
from trove.common.db import models
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common.stream_codecs import PropertiesCodec
from trove.common import utils
from trove.guestagent.common.configuration import ConfigurationManager
from trove.guestagent.common.configuration import ImportOverrideStrategy
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.experimental.vertica import system
from trove.guestagent.datastore import service
from trove.guestagent import pkg
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
packager = pkg.Package()
DB_NAME = 'db_srvr'
MOUNT_POINT = CONF.vertica.mount_point
# We will use a fake configuration file for the options managed through
# configuration groups that we apply directly with ALTER DB ... SET ...
FAKE_CFG = os.path.join(MOUNT_POINT, "vertica.cfg.fake")
class VerticaAppStatus(service.BaseDbStatus):
def _get_actual_db_status(self):
"""Get the status of dbaas and report it back."""
try:
out, err = system.shell_execute(system.STATUS_ACTIVE_DB,
system.VERTICA_ADMIN)
if out.strip() == DB_NAME:
# UP status is confirmed
LOG.info("Service Status is RUNNING.")
return rd_instance.ServiceStatuses.RUNNING
else:
LOG.info("Service Status is SHUTDOWN.")
return rd_instance.ServiceStatuses.SHUTDOWN
except exception.ProcessExecutionError:
LOG.exception("Failed to get database status.")
return rd_instance.ServiceStatuses.CRASHED
class VerticaApp(object):
"""Prepares DBaaS on a Guest container."""
def __init__(self, status):
self.state_change_wait_time = CONF.state_change_wait_time
self.status = status
revision_dir = \
guestagent_utils.build_file_path(
os.path.join(MOUNT_POINT,
os.path.dirname(system.VERTICA_ADMIN)),
ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
if not operating_system.exists(FAKE_CFG):
operating_system.write_file(FAKE_CFG, '', as_root=True)
operating_system.chown(FAKE_CFG, system.VERTICA_ADMIN,
system.VERTICA_ADMIN_GRP, as_root=True)
operating_system.chmod(FAKE_CFG, FileMode.ADD_GRP_RX_OTH_RX(),
as_root=True)
self.configuration_manager = \
ConfigurationManager(FAKE_CFG, system.VERTICA_ADMIN,
system.VERTICA_ADMIN_GRP,
PropertiesCodec(delimiter='='),
requires_root=True,
override_strategy=ImportOverrideStrategy(
revision_dir, "cnf"))
def update_overrides(self, context, overrides, remove=False):
if overrides:
self.apply_overrides(overrides)
def remove_overrides(self):
config = self.configuration_manager.get_user_override()
self._reset_config(config)
self.configuration_manager.remove_user_override()
def apply_overrides(self, overrides):
self.configuration_manager.apply_user_override(overrides)
self._apply_config(overrides)
def _reset_config(self, config):
try:
db_password = self._get_database_password()
for k, v in config.items():
alter_db_cmd = system.ALTER_DB_RESET_CFG % (DB_NAME, str(k))
out, err = system.exec_vsql_command(db_password, alter_db_cmd)
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to remove config %s") % k)
except Exception:
LOG.exception("Vertica configuration remove failed.")
raise RuntimeError(_("Vertica configuration remove failed."))
LOG.info("Vertica configuration reset completed.")
def _apply_config(self, config):
try:
db_password = self._get_database_password()
for k, v in config.items():
alter_db_cmd = system.ALTER_DB_CFG % (DB_NAME, str(k), str(v))
out, err = system.exec_vsql_command(db_password, alter_db_cmd)
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to apply config %s") % k)
except Exception:
LOG.exception("Vertica configuration apply failed")
raise RuntimeError(_("Vertica configuration apply failed"))
LOG.info("Vertica config apply completed.")
def _enable_db_on_boot(self):
try:
command = ["sudo", "su", "-", system.VERTICA_ADMIN, "-c",
(system.SET_RESTART_POLICY % (DB_NAME, "always"))]
subprocess.Popen(command)
command = ["sudo", "su", "-", "root", "-c",
(system.VERTICA_AGENT_SERVICE_COMMAND % "enable")]
subprocess.Popen(command)
except Exception:
LOG.exception("Failed to enable database on boot.")
raise RuntimeError(_("Could not enable database on boot."))
def _disable_db_on_boot(self):
try:
command = (system.SET_RESTART_POLICY % (DB_NAME, "never"))
system.shell_execute(command, system.VERTICA_ADMIN)
command = (system.VERTICA_AGENT_SERVICE_COMMAND % "disable")
system.shell_execute(command)
except exception.ProcessExecutionError:
LOG.exception("Failed to disable database on boot.")
raise RuntimeError(_("Could not disable database on boot."))
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
"""Stop the database."""
LOG.info("Stopping Vertica.")
if do_not_start_on_reboot:
self._disable_db_on_boot()
try:
# Stop vertica-agent service
command = (system.VERTICA_AGENT_SERVICE_COMMAND % "stop")
system.shell_execute(command)
# Using Vertica adminTools to stop db.
db_password = self._get_database_password()
stop_db_command = (system.STOP_DB % (DB_NAME, db_password))
out, err = system.shell_execute(system.STATUS_ACTIVE_DB,
system.VERTICA_ADMIN)
if out.strip() == DB_NAME:
system.shell_execute(stop_db_command, system.VERTICA_ADMIN)
if not self.status._is_restarting:
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error("Could not stop Vertica.")
self.status.end_restart()
raise RuntimeError(_("Could not stop Vertica!"))
LOG.debug("Database stopped.")
else:
LOG.debug("Database is not running.")
except exception.ProcessExecutionError:
LOG.exception("Failed to stop database.")
raise RuntimeError(_("Could not stop database."))
def start_db(self, update_db=False):
"""Start the database."""
LOG.info("Starting Vertica.")
try:
self._enable_db_on_boot()
# Start vertica-agent service
command = ["sudo", "su", "-", "root", "-c",
(system.VERTICA_AGENT_SERVICE_COMMAND % "start")]
subprocess.Popen(command)
# Using Vertica adminTools to start db.
db_password = self._get_database_password()
start_db_command = ["sudo", "su", "-", system.VERTICA_ADMIN, "-c",
(system.START_DB % (DB_NAME, db_password))]
subprocess.Popen(start_db_command)
if not self.status._is_restarting:
self.status.end_restart()
LOG.debug("Database started.")
except Exception as e:
raise RuntimeError(_("Could not start Vertica due to %s") % e)
def start_db_with_conf_changes(self, config_contents):
"""
Currently all that this method does is to start Vertica. This method
needs to be implemented to enable volume resize on guestagent side.
"""
LOG.info("Starting Vertica with configuration changes.")
if self.status.is_running:
format = 'Cannot start_db_with_conf_changes because status is %s.'
LOG.debug(format, self.status)
raise RuntimeError(format % self.status)
LOG.info("Initiating config.")
self.configuration_manager.save_configuration(config_contents)
self.start_db(True)
def restart(self):
"""Restart the database."""
try:
self.status.begin_restart()
self.stop_db()
self.start_db()
finally:
self.status.end_restart()
def add_db_to_node(self, members=netutils.get_my_ipv4()):
"""Add db to host with admintools"""
LOG.info("Calling admintools to add DB to host")
try:
# Create db after install
db_password = self._get_database_password()
create_db_command = (system.ADD_DB_TO_NODE % (members,
DB_NAME,
db_password))
system.shell_execute(create_db_command, "dbadmin")
except exception.ProcessExecutionError:
# Give vertica some time to get the node up, won't be available
# by the time adminTools -t db_add_node completes
LOG.info("adminTools failed as expected - wait for node")
self.wait_for_node_status()
LOG.info("Vertica add db to host completed.")
def remove_db_from_node(self, members=netutils.get_my_ipv4()):
"""Remove db from node with admintools"""
LOG.info("Removing db from node")
try:
# Create db after install
db_password = self._get_database_password()
create_db_command = (system.REMOVE_DB_FROM_NODE % (members,
DB_NAME,
db_password))
system.shell_execute(create_db_command, "dbadmin")
except exception.ProcessExecutionError:
# Give vertica some time to get the node up, won't be available
# by the time adminTools -t db_add_node completes
LOG.info("adminTools failed as expected - wait for node")
# Give vertica some time to take the node down - it won't be available
# by the time adminTools -t db_add_node completes
self.wait_for_node_status()
LOG.info("Vertica remove host from db completed.")
def create_db(self, members=netutils.get_my_ipv4()):
"""Prepare the guest machine with a Vertica db creation."""
LOG.info("Creating database on Vertica host.")
try:
# Create db after install
db_password = self._get_database_password()
create_db_command = (system.CREATE_DB % (members, DB_NAME,
MOUNT_POINT, MOUNT_POINT,
db_password))
system.shell_execute(create_db_command, system.VERTICA_ADMIN)
except Exception:
LOG.exception("Vertica database create failed.")
raise RuntimeError(_("Vertica database create failed."))
LOG.info("Vertica database create completed.")
def install_vertica(self, members=netutils.get_my_ipv4()):
"""Prepare the guest machine with a Vertica db creation."""
LOG.info("Installing Vertica Server.")
try:
# Create db after install
install_vertica_cmd = (system.INSTALL_VERTICA % (members,
MOUNT_POINT))
system.shell_execute(install_vertica_cmd)
except exception.ProcessExecutionError:
LOG.exception("install_vertica failed.")
raise RuntimeError(_("install_vertica failed."))
self._generate_database_password()
LOG.info("install_vertica completed.")
def update_vertica(self, command, members=netutils.get_my_ipv4()):
LOG.info("Calling update_vertica with command %s", command)
try:
update_vertica_cmd = (system.UPDATE_VERTICA % (command, members,
MOUNT_POINT))
system.shell_execute(update_vertica_cmd)
except exception.ProcessExecutionError:
LOG.exception("update_vertica failed.")
raise RuntimeError(_("update_vertica failed."))
# self._generate_database_password()
LOG.info("update_vertica completed.")
def add_udls(self):
"""Load the user defined load libraries into the database."""
LOG.info("Adding configured user defined load libraries.")
password = self._get_database_password()
loaded_udls = []
for lib in system.UDL_LIBS:
func_name = lib['func_name']
lib_name = lib['lib_name']
language = lib['language']
factory = lib['factory']
path = lib['path']
if os.path.isfile(path):
LOG.debug("Adding the %(func)s library as %(lib)s.",
{'func': func_name, 'lib': lib_name})
out, err = system.exec_vsql_command(
password,
system.CREATE_LIBRARY % (lib_name, path)
)
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to create library %s.")
% lib_name)
out, err = system.exec_vsql_command(
password,
system.CREATE_SOURCE % (func_name, language,
factory, lib_name)
)
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to create source %s.")
% func_name)
loaded_udls.append(func_name)
else:
LOG.warning("Skipping %(func)s as path %(path)s not "
"found.", {"func": func_name, "path": path})
LOG.info("The following UDL functions are available for use: %s",
loaded_udls)
def _generate_database_password(self):
"""Generate and write the password to vertica.cnf file."""
config = configparser.ConfigParser()
config.add_section('credentials')
config.set('credentials', 'dbadmin_password',
utils.generate_random_password())
self.write_config(config)
def write_config(self, config,
unlink_function=os.unlink,
temp_function=tempfile.NamedTemporaryFile):
"""Write the configuration contents to vertica.cnf file."""
LOG.debug('Defining config holder at %s.', system.VERTICA_CONF)
tempfile = temp_function('w', delete=False)
try:
config.write(tempfile)
tempfile.close()
command = (("install -o root -g root -m 644 %(source)s %(target)s"
) % {'source': tempfile.name,
'target': system.VERTICA_CONF})
system.shell_execute(command)
unlink_function(tempfile.name)
except Exception:
unlink_function(tempfile.name)
raise
def read_config(self):
"""Reads and returns the Vertica config."""
try:
config = configparser.ConfigParser()
config.read(system.VERTICA_CONF)
return config
except Exception:
LOG.exception("Failed to read config %s.", system.VERTICA_CONF)
raise RuntimeError
def _get_database_password(self):
"""Read the password from vertica.cnf file and return it."""
return self.read_config().get('credentials', 'dbadmin_password')
def install_if_needed(self, packages):
"""Install Vertica package if needed."""
LOG.info("Preparing Guest as Vertica Server.")
if not packager.pkg_is_installed(packages):
LOG.debug("Installing Vertica Package.")
packager.pkg_install(packages, None, system.INSTALL_TIMEOUT)
def _set_readahead_for_disks(self):
"""This method sets readhead size for disks as needed by Vertica."""
device = volume.VolumeDevice(CONF.device_path)
device.set_readahead_size(CONF.vertica.readahead_size)
LOG.debug("Set readhead size as required by Vertica.")
def prepare_for_install_vertica(self):
"""This method executes preparatory methods before
executing install_vertica.
"""
command = ("VERT_DBA_USR=%s VERT_DBA_HOME=/home/dbadmin "
"VERT_DBA_GRP=%s /opt/vertica/oss/python/bin/python"
" -m vertica.local_coerce" %
(system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP))
try:
self._set_readahead_for_disks()
system.shell_execute(command)
except exception.ProcessExecutionError:
LOG.exception("Failed to prepare for install_vertica.")
raise
def mark_design_ksafe(self, k):
"""Wrapper for mark_design_ksafe function for setting k-safety """
LOG.info("Setting Vertica k-safety to %s", str(k))
out, err = system.exec_vsql_command(self._get_database_password(),
system.MARK_DESIGN_KSAFE % k)
# Only fail if we get an ERROR as opposed to a warning complaining
# about setting k = 0
if "ERROR" in err:
LOG.error(err)
raise RuntimeError(_("Failed to set k-safety level %s.") % k)
def _create_user(self, username, password, role=None):
"""Creates a user, granting and enabling the given role for it."""
LOG.info("Creating user in Vertica database.")
out, err = system.exec_vsql_command(self._get_database_password(),
system.CREATE_USER %
(username, password))
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to create user %s.") % username)
if role:
self._grant_role(username, role)
def _grant_role(self, username, role):
"""Grants a role to the user on the schema."""
out, err = system.exec_vsql_command(self._get_database_password(),
system.GRANT_TO_USER
% (role, username))
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to grant role %(r)s to user "
"%(u)s.")
% {'r': role, 'u': username})
out, err = system.exec_vsql_command(self._get_database_password(),
system.ENABLE_FOR_USER
% (username, role))
if err:
LOG.warning(err)
def enable_root(self, root_password=None):
"""Resets the root password."""
LOG.info("Enabling root.")
user = models.DatastoreUser.root(password=root_password)
if not self.is_root_enabled():
self._create_user(user.name, user.password, 'pseudosuperuser')
else:
LOG.debug("Updating %s password.", user.name)
try:
out, err = system.exec_vsql_command(
self._get_database_password(),
system.ALTER_USER_PASSWORD % (user.name, user.password))
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to update %s "
"password.") % user.name)
except exception.ProcessExecutionError:
LOG.error("Failed to update %s password.", user.name)
raise RuntimeError(_("Failed to update %s password.")
% user.name)
return user.serialize()
def is_root_enabled(self):
"""Return True if root access is enabled else False."""
LOG.debug("Checking is root enabled.")
try:
out, err = system.shell_execute(system.USER_EXISTS %
(self._get_database_password(),
'root'), system.VERTICA_ADMIN)
if err:
LOG.error(err)
raise RuntimeError(_("Failed to query for root user."))
except exception.ProcessExecutionError:
raise RuntimeError(_("Failed to query for root user."))
return out.rstrip() == "1"
def get_public_keys(self, user):
"""Generates key (if not found), and sends public key for user."""
LOG.debug("Public keys requested for user: %s.", user)
user_home_directory = os.path.expanduser('~' + user)
public_key_file_name = user_home_directory + '/.ssh/id_rsa.pub'
try:
key_generate_command = (system.SSH_KEY_GEN % user_home_directory)
system.shell_execute(key_generate_command, user)
except exception.ProcessExecutionError:
LOG.debug("Cannot generate key.")
try:
read_key_cmd = ("cat %(file)s" % {'file': public_key_file_name})
out, err = system.shell_execute(read_key_cmd)
except exception.ProcessExecutionError:
LOG.exception("Cannot read public key.")
raise
return out.strip()
def authorize_public_keys(self, user, public_keys):
"""Adds public key to authorized_keys for user."""
LOG.debug("public keys to be added for user: %s.", user)
user_home_directory = os.path.expanduser('~' + user)
authorized_file_name = user_home_directory + '/.ssh/authorized_keys'
try:
read_key_cmd = ("cat %(file)s" % {'file': authorized_file_name})
out, err = system.shell_execute(read_key_cmd)
public_keys.append(out.strip())
except exception.ProcessExecutionError:
LOG.debug("Cannot read authorized_keys.")
all_keys = '\n'.join(public_keys) + "\n"
try:
with tempfile.NamedTemporaryFile("w", delete=False) as tempkeyfile:
tempkeyfile.write(all_keys)
copy_key_cmd = (("install -o %(user)s -m 600 %(source)s %(target)s"
) % {'user': user, 'source': tempkeyfile.name,
'target': authorized_file_name})
system.shell_execute(copy_key_cmd)
os.remove(tempkeyfile.name)
except exception.ProcessExecutionError:
LOG.exception("Cannot install public keys.")
os.remove(tempkeyfile.name)
raise
def _export_conf_to_members(self, members):
"""This method exports conf files to other members."""
try:
for member in members:
COPY_CMD = (system.SEND_CONF_TO_SERVER % (system.VERTICA_CONF,
member,
system.VERTICA_CONF))
system.shell_execute(COPY_CMD)
except exception.ProcessExecutionError:
LOG.exception("Cannot export configuration.")
raise
def install_cluster(self, members):
"""Installs & configures cluster."""
cluster_members = ','.join(members)
LOG.debug("Installing cluster with members: %s.", cluster_members)
self.install_vertica(cluster_members)
self._export_conf_to_members(members)
LOG.debug("Creating database with members: %s.", cluster_members)
self.create_db(cluster_members)
LOG.debug("Cluster configured on members: %s.", cluster_members)
def grow_cluster(self, members):
"""Adds nodes to cluster."""
cluster_members = ','.join(members)
LOG.debug("Growing cluster with members: %s.", cluster_members)
self.update_vertica("--add-hosts", cluster_members)
self._export_conf_to_members(members)
LOG.debug("Creating database with members: %s.", cluster_members)
self.add_db_to_node(cluster_members)
LOG.debug("Cluster configured on members: %s.", cluster_members)
def shrink_cluster(self, members):
"""Removes nodes from cluster."""
cluster_members = ','.join(members)
LOG.debug("Shrinking cluster with members: %s.", cluster_members)
self.remove_db_from_node(cluster_members)
self.update_vertica("--remove-hosts", cluster_members)
def wait_for_node_status(self, status='UP'):
"""Wait until all nodes are the same status"""
# select node_state from nodes where node_state <> 'UP'
def _wait_for_node_status():
out, err = system.exec_vsql_command(self._get_database_password(),
system.NODE_STATUS % status)
LOG.debug("Polled vertica node states: %s", out)
if err:
LOG.error(err)
raise RuntimeError(_("Failed to query for root user."))
return "0 rows" in out
try:
utils.poll_until(_wait_for_node_status, time_out=600,
sleep_time=15)
except exception.PollTimeOut:
raise RuntimeError(_("Timed out waiting for cluster to "
"change to status %s") % status)
| 45.048465 | 79 | 0.58146 | 3,029 | 27,885 | 5.13932 | 0.146253 | 0.011691 | 0.02197 | 0.022162 | 0.419606 | 0.359286 | 0.309372 | 0.252329 | 0.22779 | 0.206848 | 0 | 0.001551 | 0.329281 | 27,885 | 618 | 80 | 45.121359 | 0.830776 | 0.103174 | 0 | 0.349594 | 0 | 0 | 0.134907 | 0.004358 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079268 | false | 0.081301 | 0.046748 | 0 | 0.148374 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7dcd9cbc95d9ac46a0346d6a8f8325d12f3bf6be | 681 | py | Python | setup.py | jacobschaer/qt_compat | 8121500c1fb6f95d3cfff033410e055a187a39c9 | [
"MIT"
] | null | null | null | setup.py | jacobschaer/qt_compat | 8121500c1fb6f95d3cfff033410e055a187a39c9 | [
"MIT"
] | null | null | null | setup.py | jacobschaer/qt_compat | 8121500c1fb6f95d3cfff033410e055a187a39c9 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="QtCompat",
version="0.1",
packages=find_packages(),
scripts=[],
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
install_requires=[],
package_data={
},
# metadata for upload to PyPI
author="Jacob Schaer",
author_email="",
description="PyQt4, 5 and Pyside Compatibility Library",
license="MIT",
keywords="pyqt4 pyqt5 pyside compatibility",
url="https://github.com/jacobschaer/qt_compat/", # project home page, if any
# could also include long_description, download_url, classifiers, etc.
) | 28.375 | 82 | 0.690162 | 81 | 681 | 5.703704 | 0.851852 | 0.051948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011091 | 0.20558 | 681 | 24 | 83 | 28.375 | 0.842884 | 0.33627 | 0 | 0 | 0 | 0 | 0.313199 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7dcea3fbbfd1ee77dfca864ce3a07a6ca9ff127e | 389 | py | Python | annotations/filters.py | acdh-oeaw/ner-annotator | ee8f72248669b848eb273644d80ad52dc495a07c | [
"MIT"
] | 1 | 2019-01-02T15:05:30.000Z | 2019-01-02T15:05:30.000Z | annotations/filters.py | acdh-oeaw/ner-annotator | ee8f72248669b848eb273644d80ad52dc495a07c | [
"MIT"
] | 8 | 2020-02-11T23:02:04.000Z | 2021-06-10T20:39:58.000Z | annotations/filters.py | acdh-oeaw/ner-annotator | ee8f72248669b848eb273644d80ad52dc495a07c | [
"MIT"
] | 1 | 2019-01-02T15:05:31.000Z | 2019-01-02T15:05:31.000Z | import django_filters
from . models import NerSample
class NerSampleListFilter(django_filters.FilterSet):
text = django_filters.CharFilter(
lookup_expr='icontains',
help_text=NerSample._meta.get_field('text').help_text,
label=NerSample._meta.get_field('text').verbose_name
)
class Meta:
model = NerSample
fields = ['text', 'id']
| 24.3125 | 62 | 0.678663 | 43 | 389 | 5.883721 | 0.55814 | 0.15415 | 0.126482 | 0.166008 | 0.197628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.218509 | 389 | 15 | 63 | 25.933333 | 0.832237 | 0 | 0 | 0 | 0 | 0 | 0.059126 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7dd13c6ad4dc8afcb18c82aeecd32fc176c29e34 | 1,261 | py | Python | apps/user/migrations/0005_auto_20190804_1443.py | tiger-fight-tonight/E-Server | 3939bc3f8c090441cc2af17f4e6cb777642fb792 | [
"Apache-2.0"
] | 6 | 2019-07-18T16:21:17.000Z | 2020-11-19T04:47:02.000Z | apps/user/migrations/0005_auto_20190804_1443.py | tiger-fight-tonight/E-Server | 3939bc3f8c090441cc2af17f4e6cb777642fb792 | [
"Apache-2.0"
] | null | null | null | apps/user/migrations/0005_auto_20190804_1443.py | tiger-fight-tonight/E-Server | 3939bc3f8c090441cc2af17f4e6cb777642fb792 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.1.7 on 2019-08-04 06:43
import datetime
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('user', '0004_auto_20190804_1438'),
]
operations = [
migrations.AlterField(
model_name='subjectinfo',
name='subject_id',
field=models.CharField(default=uuid.UUID('6c50ec1b-f1b5-426f-8365-7e1962074900'), editable=False, max_length=50, primary_key=True, serialize=False, verbose_name='科目ID'),
),
migrations.AlterField(
model_name='userprofile',
name='create_time',
field=models.DateTimeField(default=datetime.datetime(2019, 8, 4, 14, 43, 45, 491036), verbose_name='创建时间'),
),
migrations.AlterField(
model_name='userprofile',
name='update_time',
field=models.DateTimeField(auto_now=True, verbose_name='更新时间'),
),
migrations.AlterField(
model_name='userprofile',
name='user_id',
field=models.CharField(default=uuid.UUID('ea94d36f-ada5-4e0a-bfbf-e6df269b18de'), editable=False, max_length=50, primary_key=True, serialize=False, verbose_name='用户ID'),
),
]
| 35.027778 | 181 | 0.634417 | 138 | 1,261 | 5.652174 | 0.521739 | 0.102564 | 0.128205 | 0.148718 | 0.425641 | 0.425641 | 0.25641 | 0.161538 | 0.161538 | 0.161538 | 0 | 0.094044 | 0.241079 | 1,261 | 35 | 182 | 36.028571 | 0.721003 | 0.035686 | 0 | 0.37931 | 1 | 0 | 0.163097 | 0.078254 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.103448 | 0 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7dd3f523efb7218a00299577b756498b0e6e336c | 508 | py | Python | submissions/mirror-reflection/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | null | null | null | submissions/mirror-reflection/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | 1 | 2022-03-04T20:24:32.000Z | 2022-03-04T20:31:58.000Z | submissions/mirror-reflection/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | null | null | null | # https://leetcode.com/problems/mirror-reflection
class Solution:
def mirrorReflection(self, p, q):
if q == 0:
return 0
i = 0
val = 0
while True:
val += q
i += 1
if (i % 2 == 0) and (val % p == 0):
return 2
elif (i % 2 == 1) and (val % (2 * p) == 0):
return 0
elif (i % 2 == 1) and (val % p == 0):
return 1
else:
continue
| 24.190476 | 55 | 0.36811 | 61 | 508 | 3.065574 | 0.42623 | 0.149733 | 0.128342 | 0.085562 | 0.256684 | 0.139037 | 0 | 0 | 0 | 0 | 0 | 0.072581 | 0.511811 | 508 | 20 | 56 | 25.4 | 0.681452 | 0.09252 | 0 | 0.117647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7dd470fef059403a7425a058aa8ed792b44ec169 | 4,290 | py | Python | sdk/python/kulado_azure/batch/get_account.py | kulado/kulado-azure | f3a408fa0405fe6ae93e0049b2ae0f0e266f1cf6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/kulado_azure/batch/get_account.py | kulado/kulado-azure | f3a408fa0405fe6ae93e0049b2ae0f0e266f1cf6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/kulado_azure/batch/get_account.py | kulado/kulado-azure | f3a408fa0405fe6ae93e0049b2ae0f0e266f1cf6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Kulado Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import kulado
import kulado.runtime
from .. import utilities, tables
class GetAccountResult:
"""
A collection of values returned by getAccount.
"""
def __init__(__self__, account_endpoint=None, location=None, name=None, pool_allocation_mode=None, primary_access_key=None, resource_group_name=None, secondary_access_key=None, storage_account_id=None, tags=None, id=None):
if account_endpoint and not isinstance(account_endpoint, str):
raise TypeError("Expected argument 'account_endpoint' to be a str")
__self__.account_endpoint = account_endpoint
"""
The account endpoint used to interact with the Batch service.
"""
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
__self__.location = location
"""
The Azure Region in which this Batch account exists.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
"""
The Batch account name.
"""
if pool_allocation_mode and not isinstance(pool_allocation_mode, str):
raise TypeError("Expected argument 'pool_allocation_mode' to be a str")
__self__.pool_allocation_mode = pool_allocation_mode
"""
The pool allocation mode configured for this Batch account.
"""
if primary_access_key and not isinstance(primary_access_key, str):
raise TypeError("Expected argument 'primary_access_key' to be a str")
__self__.primary_access_key = primary_access_key
"""
The Batch account primary access key.
"""
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
__self__.resource_group_name = resource_group_name
if secondary_access_key and not isinstance(secondary_access_key, str):
raise TypeError("Expected argument 'secondary_access_key' to be a str")
__self__.secondary_access_key = secondary_access_key
"""
The Batch account secondary access key.
"""
if storage_account_id and not isinstance(storage_account_id, str):
raise TypeError("Expected argument 'storage_account_id' to be a str")
__self__.storage_account_id = storage_account_id
"""
The ID of the Storage Account used for this Batch account.
"""
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
__self__.tags = tags
"""
A map of tags assigned to the Batch account.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
async def get_account(name=None,resource_group_name=None,opts=None):
"""
Use this data source to access information about an existing Batch Account.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/d/batch_account.html.markdown.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__ret__ = await kulado.runtime.invoke('azure:batch/getAccount:getAccount', __args__, opts=opts)
return GetAccountResult(
account_endpoint=__ret__.get('accountEndpoint'),
location=__ret__.get('location'),
name=__ret__.get('name'),
pool_allocation_mode=__ret__.get('poolAllocationMode'),
primary_access_key=__ret__.get('primaryAccessKey'),
resource_group_name=__ret__.get('resourceGroupName'),
secondary_access_key=__ret__.get('secondaryAccessKey'),
storage_account_id=__ret__.get('storageAccountId'),
tags=__ret__.get('tags'),
id=__ret__.get('id'))
| 44.226804 | 226 | 0.675991 | 527 | 4,290 | 5.140417 | 0.239089 | 0.053156 | 0.059062 | 0.110742 | 0.241787 | 0.121816 | 0.046512 | 0 | 0 | 0 | 0 | 0.000306 | 0.237063 | 4,290 | 96 | 227 | 44.6875 | 0.827376 | 0.052448 | 0 | 0 | 1 | 0 | 0.197774 | 0.031161 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018868 | false | 0 | 0.09434 | 0 | 0.150943 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7dd4c10b342878f52f717eef146ce0ddd5328f2c | 1,988 | py | Python | run/run_fd_tgv_conv.py | huppd/PINTimpact | 766b2ef4d2fa9e6727965e48a3fba7b752074850 | [
"MIT"
] | null | null | null | run/run_fd_tgv_conv.py | huppd/PINTimpact | 766b2ef4d2fa9e6727965e48a3fba7b752074850 | [
"MIT"
] | null | null | null | run/run_fd_tgv_conv.py | huppd/PINTimpact | 766b2ef4d2fa9e6727965e48a3fba7b752074850 | [
"MIT"
] | null | null | null | """ running converferce for finite differences and Taylor-Green vortex """
import os
from math import pi
import xml.etree.ElementTree as ET
import platform_paths as pp
import manipulator as ma
# load parameter file
ma.set_ids('../XML/parameterTGVTime.xml')
TREE = ET.parse('../XML/parameterTGVTime.xml')
ROOT = TREE.getroot()
ma.set_parameter(ROOT, 'withoutput', 1)
ma.set_parameter(ROOT, 'initial guess', 'zero')
# ma.set_parameter( ROOT, 'refinement level', 1 )
# make executable ready
EXE = 'peri_navier3DTime'
os.chdir(pp.EXE_PATH)
os.system('make '+EXE+' -j4')
CASE_PATH = ['']*4
RUNS = range(1)
RES = [10]
STS = [0.1, 10., 1.]
NFS = [72]
ma.set_parameter(ROOT, 'nx', 65)
ma.set_parameter(ROOT, 'ny', 65)
ma.set_parameter(ROOT, 'nz', 5)
CASE_PATH[0] = pp.DATA_PATH + '/FDTGV_conv2'
pp.mkdir(CASE_PATH, 0)
for re in RES:
CASE_PATH[1] = '/re_'+str(re)
pp.mkdir(CASE_PATH, 1)
for st in STS:
CASE_PATH[2] = '/a2_'+str(st)
pp.mkdir(CASE_PATH, 2)
for nf in NFS:
CASE_PATH[3] = '/nt_'+str(nf)
pp.mkdir(CASE_PATH, 3)
#
pp.chdir(CASE_PATH, 3)
#
ma.set_parameter(ROOT, 'Re', re)
ma.set_parameter(ROOT, 'alpha2', 2.*pi*st*re)
ma.set_parameter(ROOT, 'nf', nf)
ma.set_parameter(ROOT, 'npx', 1)
ma.set_parameter(ROOT, 'npy', 1)
ma.set_parameter(ROOT, 'npz', 1)
ma.set_parameter(ROOT, 'npf', 12)
TREE.write('parameter3D.xml')
# nptot = npx[i]*npy[i]*npf[i]
nptot = 12
mem = int(max(1024, 60*1024/nptot))
for run in RUNS:
print()
print(CASE_PATH)
exeString = \
pp.exe_pre(nptot, ' -N -R "rusage[mem=' +
str(mem) + ']" -W 6:00', run) + \
pp.EXE_PATH+'/'+EXE
print(exeString)
os.system(exeString)
| 27.611111 | 74 | 0.551308 | 277 | 1,988 | 3.830325 | 0.375451 | 0.065975 | 0.171536 | 0.220547 | 0.147031 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038654 | 0.297284 | 1,988 | 71 | 75 | 28 | 0.72083 | 0.094064 | 0 | 0 | 0 | 0 | 0.114589 | 0.030184 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.096154 | 0 | 0.096154 | 0.057692 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7de18177bc8f9c705a1427b2d13f1d6f74890139 | 1,308 | py | Python | test/test_message.py | Smac01/Stego | 0bcf94642871e611b6731676591a571ff40ce4a0 | [
"MIT"
] | null | null | null | test/test_message.py | Smac01/Stego | 0bcf94642871e611b6731676591a571ff40ce4a0 | [
"MIT"
] | null | null | null | test/test_message.py | Smac01/Stego | 0bcf94642871e611b6731676591a571ff40ce4a0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import unittest
import sys
sys.path.insert(0, '.')
from random import choice
from PIL import Image
from stego.encoder import embed
from stego.decoder import extract, _decompress, IncorrectPassword
from stego.base import make_array, as_string, extract_metadata
images = ['test/rgba.png', 'test/cmyk.tiff', 'test/greyscale.bmp']
image = choice(images)
message = b'Pixels -> smallest unit(small colored square) that constitutes an images.'
key = b'my_secret_key'
def test_embed(message, password):
imageobj = Image.open(image)
embed(imageobj, message, password)
def test_extract(password):
imageobj = Image.open(image)
img_data = make_array(imageobj.getdata())
exif = extract_metadata(img_data)
content = as_string(img_data[slice(24, exif.size)])
if password:
content = _decompress(content, key=password)
else:
content = _decompress(content)
return content
class SampleTestMessage(unittest.TestCase):
def test_message(self):
test_embed(message, None)
content = test_extract(None)
self.assertEqual(message, content)
def test_message_with_encryption(self):
test_embed(message,key)
content = test_extract(key)
self.assertEqual(message, content)
self.assertRaises(IncorrectPassword,test_extract, b'random')
if __name__ == '__main__':
unittest.main() | 25.647059 | 86 | 0.769113 | 176 | 1,308 | 5.528409 | 0.431818 | 0.028777 | 0.049332 | 0.051387 | 0.061665 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003481 | 0.12156 | 1,308 | 51 | 87 | 25.647059 | 0.843342 | 0.016055 | 0 | 0.108108 | 0 | 0 | 0.113442 | 0 | 0 | 0 | 0 | 0 | 0.081081 | 1 | 0.108108 | false | 0.189189 | 0.189189 | 0 | 0.351351 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7de74902240dafd5d3ece0f149442d4593ed9d43 | 1,091 | py | Python | tests/test_dashboard_generator_generate_widget.py | phelewski/aws-codepipeline-dashboard | c32fbfb01b383be9b5f813fac4ed36074e3ddc7e | [
"MIT"
] | null | null | null | tests/test_dashboard_generator_generate_widget.py | phelewski/aws-codepipeline-dashboard | c32fbfb01b383be9b5f813fac4ed36074e3ddc7e | [
"MIT"
] | 5 | 2021-04-02T18:12:58.000Z | 2021-05-21T12:15:30.000Z | tests/test_dashboard_generator_generate_widget.py | phelewski/aws-codepipeline-dashboard | c32fbfb01b383be9b5f813fac4ed36074e3ddc7e | [
"MIT"
] | null | null | null | import os
import pytest
from dashboard_generator import DashboardGenerator
def test_generate_widget_ensure_return_value_is_dict(env_variables):
response = DashboardGenerator()._generate_widget(y=1, period=60, pipeline='foo')
assert type(response) == dict
def test_generate_widget_ensure_values_are_used_properly_in_widget(env_variables):
y = 1
period = 60
pipeline = 'foo'
dimension = 'PipelineName'
response = DashboardGenerator()._generate_widget(y, period, pipeline)
for metric in response['properties']['metrics']:
if 'SuccessCount' in metric:
assert metric == [
'Pipeline',
'SuccessCount',
dimension,
pipeline,
{
'color': '#000000',
'label': 'Success Count',
'stat': 'Sum'
}
]
assert response['properties']['region'] == os.environ['AWS_REGION']
assert response['properties']['title'] == pipeline
assert response['properties']['period'] == period
| 29.486486 | 84 | 0.6022 | 103 | 1,091 | 6.145631 | 0.495146 | 0.088468 | 0.113744 | 0.066351 | 0.279621 | 0.066351 | 0 | 0 | 0 | 0 | 0 | 0.015484 | 0.289643 | 1,091 | 36 | 85 | 30.305556 | 0.80129 | 0 | 0 | 0 | 1 | 0 | 0.147571 | 0 | 0 | 0 | 0 | 0 | 0.178571 | 1 | 0.071429 | false | 0 | 0.107143 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7de837001eba6d36074503fa3a70a1bcb083d08b | 795 | py | Python | opencadd/tests/structure/test_superposition_mda.py | pipaj97/opencadd | 4fcf090bd612a22df9d617473ae458316a4cb4b6 | [
"MIT"
] | 39 | 2020-08-14T07:33:21.000Z | 2022-03-30T02:05:19.000Z | opencadd/tests/structure/test_superposition_mda.py | Allend95/opencadd | 1fde238e3cf8e5e47e8266a504d9df0196505e97 | [
"MIT"
] | 94 | 2020-06-29T12:47:46.000Z | 2022-02-13T19:16:25.000Z | opencadd/tests/structure/test_superposition_mda.py | Allend95/opencadd | 1fde238e3cf8e5e47e8266a504d9df0196505e97 | [
"MIT"
] | 11 | 2020-11-11T17:12:38.000Z | 2022-03-21T09:23:39.000Z | """
Tests for opencadd.structure.superposition.engines.mda
"""
import pytest
from opencadd.structure.core import Structure
from opencadd.structure.superposition.engines.mda import MDAnalysisAligner
def test_mda_instantiation():
aligner = MDAnalysisAligner()
def test_mda_calculation():
aligner = MDAnalysisAligner()
structures = [Structure.from_pdbid(pdb_id) for pdb_id in ["4u3y", "4u40"]]
result = aligner.calculate(structures)
# Check API compliance
assert "superposed" in result
assert "scores" in result
assert "rmsd" in result["scores"]
assert "metadata" in result
# Check RMSD values
# TODO: pytest.approx is not working reliably - check with Dennis too, he has the same problem
assert pytest.approx(result["scores"]["rmsd"], 1.989)
| 28.392857 | 98 | 0.733333 | 98 | 795 | 5.877551 | 0.520408 | 0.055556 | 0.104167 | 0.128472 | 0.159722 | 0.159722 | 0 | 0 | 0 | 0 | 0 | 0.013636 | 0.169811 | 795 | 27 | 99 | 29.444444 | 0.859091 | 0.23522 | 0 | 0.142857 | 0 | 0 | 0.086957 | 0 | 0 | 0 | 0 | 0.037037 | 0.357143 | 1 | 0.142857 | false | 0 | 0.214286 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7dee5b01ddca7ca6f3f444bdaf770ca84c443c68 | 572 | py | Python | tests/integration/test_serialise.py | csiro-easi/eo-datasets | 7805c569763f828cb0ace84c93932bddb882a6a3 | [
"Apache-2.0"
] | null | null | null | tests/integration/test_serialise.py | csiro-easi/eo-datasets | 7805c569763f828cb0ace84c93932bddb882a6a3 | [
"Apache-2.0"
] | null | null | null | tests/integration/test_serialise.py | csiro-easi/eo-datasets | 7805c569763f828cb0ace84c93932bddb882a6a3 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from typing import Dict
from eodatasets3 import serialise
from .common import assert_same, dump_roundtrip
def test_valid_document_works(tmp_path: Path, example_metadata: Dict):
generated_doc = dump_roundtrip(example_metadata)
# Do a serialisation roundtrip and check that it's still identical.
reserialised_doc = dump_roundtrip(
serialise.to_doc(serialise.from_doc(generated_doc))
)
assert_same(generated_doc, reserialised_doc)
assert serialise.from_doc(generated_doc) == serialise.from_doc(reserialised_doc)
| 30.105263 | 84 | 0.791958 | 76 | 572 | 5.671053 | 0.460526 | 0.12065 | 0.111369 | 0.088167 | 0.12993 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002053 | 0.148601 | 572 | 18 | 85 | 31.777778 | 0.882957 | 0.113636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.272727 | 1 | 0.090909 | false | 0 | 0.363636 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
7deee6c010d48a8d2b8631423560a24cab9c77a0 | 4,369 | py | Python | src/plot/plot-bb/plot_methods.py | bcrafton/speed_read | 3e9c0c873e49e4948a216aae14ec0d4654d1a62c | [
"MIT"
] | null | null | null | src/plot/plot-bb/plot_methods.py | bcrafton/speed_read | 3e9c0c873e49e4948a216aae14ec0d4654d1a62c | [
"MIT"
] | null | null | null | src/plot/plot-bb/plot_methods.py | bcrafton/speed_read | 3e9c0c873e49e4948a216aae14ec0d4654d1a62c | [
"MIT"
] | 2 | 2020-11-08T12:51:23.000Z | 2021-12-02T23:16:48.000Z |
import numpy as np
import matplotlib.pyplot as plt
####################
def merge_dicts(list_of_dicts):
results = {}
for d in list_of_dicts:
for key in d.keys():
if key in results.keys():
results[key].append(d[key])
else:
results[key] = [d[key]]
return results
####################
comp_pJ = 22. * 1e-12 / 32. / 16.
num_layers = 6
num_comparator = 8
results = np.load('results.npy', allow_pickle=True).item()
y_mean = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_std = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_mac_per_cycle = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_mac_per_pJ = np.zeros(shape=(2, 2, 2, 2, num_layers))
cycle = np.zeros(shape=(2, 2, 2, 2, num_layers))
nmac = np.zeros(shape=(2, 2, 2, 2, num_layers))
array = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_ron = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_roff = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_adc = np.zeros(shape=(2, 2, 2, 2, num_layers, num_comparator))
y_energy = np.zeros(shape=(2, 2, 2, 2, num_layers))
array_util = np.zeros(shape=(2, 2, 2, 2, num_layers))
for key in sorted(results.keys()):
(skip, cards, alloc, profile) = key
alloc = 1 if alloc == 'block' else 0
layer_results = results[key]
max_cycle = 0
for layer in range(num_layers):
rdict = merge_dicts(layer_results[layer])
############################
y_mean[skip][cards][alloc][profile][layer] = np.mean(rdict['mean'])
y_std[skip][cards][alloc][profile][layer] = np.mean(rdict['std'])
############################
y_ron[skip][cards][alloc][profile][layer] = np.sum(rdict['ron'])
y_roff[skip][cards][alloc][profile][layer] = np.sum(rdict['roff'])
y_adc[skip][cards][alloc][profile][layer] = np.sum(rdict['adc'], axis=0)
y_energy[skip][cards][alloc][profile][layer] += y_ron[skip][cards][alloc][profile][layer] * 2e-16
y_energy[skip][cards][alloc][profile][layer] += y_roff[skip][cards][alloc][profile][layer] * 2e-16
y_energy[skip][cards][alloc][profile][layer] += np.sum(y_adc[skip][cards][alloc][profile][layer] * np.array([1,2,3,4,5,6,7,8]) * comp_pJ)
y_mac_per_cycle[skip][cards][alloc][profile][layer] = np.sum(rdict['nmac']) / np.sum(rdict['cycle'])
y_mac_per_pJ[skip][cards][alloc][profile][layer] = np.sum(rdict['nmac']) / 1e12 / np.sum(y_energy[skip][cards][alloc][profile][layer])
############################
cycle[skip][cards][alloc][profile][layer] = np.mean(rdict['cycle'])
nmac[skip][cards][alloc][profile][layer] = np.mean(rdict['nmac'])
array[skip][cards][alloc][profile][layer] = np.mean(rdict['array'])
############################
max_cycle = max(max_cycle, np.mean(rdict['cycle']))
############################
for layer in range(num_layers):
rdict = merge_dicts(layer_results[layer])
############################
y_cycle = np.mean(rdict['cycle'])
y_stall = np.mean(rdict['stall'])
y_array = np.mean(rdict['array'])
array_util[skip][cards][alloc][profile][layer] = (y_array * y_cycle - y_stall) / (y_array * max_cycle)
############################
####################
layers = np.array(range(1, 6+1))
skip_none = int(np.max(cycle[1, 0, 0, 0]))
skip_layer = int(np.max(cycle[1, 0, 0, 1]))
skip_block = int(np.max(cycle[1, 0, 1, 1]))
cards_none = int(np.max(cycle[1, 1, 0, 0]))
cards_layer = int(np.max(cycle[1, 1, 0, 1]))
cards_block = int(np.max(cycle[1, 1, 1, 1]))
height = [skip_none, skip_layer, skip_block, cards_none, cards_layer, cards_block]
x = ['skip/none', 'skip/layer', 'skip/block', 'cards/none', 'cards/layer', 'cards/block']
####################
plt.rcParams.update({'font.size': 12})
####################
plt.cla()
plt.clf()
plt.close()
plt.ylabel('# Cycles')
# plt.xlabel('Method')
plt.xticks(range(len(x)), x, rotation=45)
width = 0.2
plt.bar(x=x, height=height, width=width)
ax = plt.gca()
for i, h in enumerate(height):
# print (i, h)
ax.text(i - width, h + np.min(height)*0.02, str(h), fontdict={'size': 12})
fig = plt.gcf()
fig.set_size_inches(9, 5)
plt.tight_layout()
fig.savefig('cycles.png', dpi=300)
####################
| 29.721088 | 145 | 0.559396 | 656 | 4,369 | 3.591463 | 0.182927 | 0.03056 | 0.03056 | 0.169355 | 0.58871 | 0.570883 | 0.534805 | 0.480475 | 0.325976 | 0.260187 | 0 | 0.034746 | 0.189746 | 4,369 | 146 | 146 | 29.924658 | 0.630791 | 0.007553 | 0 | 0.051282 | 0 | 0 | 0.042893 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012821 | false | 0 | 0.025641 | 0 | 0.051282 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8148c634d7eb81e51ee23984bd4ad754b8ff54d8 | 816 | py | Python | models/__init__.py | pgodet/star_flow | cedb96ff339d11abf71d12d09e794593a742ccce | [
"Apache-2.0"
] | 10 | 2020-11-17T12:55:00.000Z | 2022-01-13T07:23:55.000Z | models/__init__.py | pgodet/star_flow | cedb96ff339d11abf71d12d09e794593a742ccce | [
"Apache-2.0"
] | 1 | 2021-01-02T22:46:07.000Z | 2021-01-02T22:46:07.000Z | models/__init__.py | pgodet/star_flow | cedb96ff339d11abf71d12d09e794593a742ccce | [
"Apache-2.0"
] | 1 | 2021-01-26T10:53:02.000Z | 2021-01-26T10:53:02.000Z | from . import pwcnet
from . import pwcnet_irr
from . import pwcnet_occ_joint
from . import pwcnet_irr_occ_joint
from . import tr_flow
from . import tr_features
from . import IRR_PWC
from . import IRR_PWC_occ_joint
from . import STAR
PWCNet = pwcnet.PWCNet
PWCNet_irr = pwcnet_irr.PWCNet
PWCNet_occ_joint = pwcnet_occ_joint.PWCNet
PWCNet_irr_occ_joint = pwcnet_irr_occ_joint.PWCNet
TRFlow = tr_flow.TRFlow
TRFlow_occjoint = tr_flow.TRFlow_occjoint
TRFlow_irr = tr_flow.TRFlow_irr
TRFlow_irr_occjoint = tr_flow.TRFlow_irr_occjoint
TRFeat = tr_features.TRFeat
TRFeat_occjoint = tr_features.TRFeat_occjoint
TRFeat_irr_occjoint = tr_features.TRFeat_irr_occjoint
# -- With refinement ---
IRR_PWC = IRR_PWC.PWCNet
IRR_occ_joint = IRR_PWC_occ_joint.PWCNet
StarFlow = STAR.StarFlow
| 24 | 53 | 0.792892 | 123 | 816 | 4.837398 | 0.138211 | 0.151261 | 0.117647 | 0.114286 | 0.067227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.154412 | 816 | 33 | 54 | 24.727273 | 0.862319 | 0.026961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.391304 | 0 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
81523ae13c659215630baf70c984ec0ce5e2200e | 1,213 | py | Python | hanzi_font_deconstructor/scripts/create_training_data.py | chanind/hanzi-font-deconstructor | ce41b2a5c0e66b8a83d6c734678446d1d32a18b7 | [
"MIT"
] | null | null | null | hanzi_font_deconstructor/scripts/create_training_data.py | chanind/hanzi-font-deconstructor | ce41b2a5c0e66b8a83d6c734678446d1d32a18b7 | [
"MIT"
] | null | null | null | hanzi_font_deconstructor/scripts/create_training_data.py | chanind/hanzi-font-deconstructor | ce41b2a5c0e66b8a83d6c734678446d1d32a18b7 | [
"MIT"
] | null | null | null | from dataclasses import asdict
from hanzi_font_deconstructor.common.generate_training_data import (
STROKE_VIEW_BOX,
get_training_input_svg_and_masks,
)
from os import path, makedirs
from pathlib import Path
import shutil
import argparse
PROJECT_ROOT = Path(__file__).parents[2]
DEST_FOLDER = PROJECT_ROOT / "data"
parser = argparse.ArgumentParser(
description="Generate training data for a model to deconstruct hanzi into strokes"
)
parser.add_argument("--max-strokes-per-img", default=5, type=int)
parser.add_argument("--total-images", default=50, type=int)
args = parser.parse_args()
if __name__ == "__main__":
# create and empty the dest folder
if path.exists(DEST_FOLDER):
shutil.rmtree(DEST_FOLDER)
makedirs(DEST_FOLDER)
makedirs(DEST_FOLDER / "sample_svgs")
# create the data
data = {
"viewbox": STROKE_VIEW_BOX,
"imgs": [],
}
for i in range(args.total_images):
(img_svg, stroke_masks) = get_training_input_svg_and_masks(256)
label = f"{i}-{len(stroke_masks)}"
with open(DEST_FOLDER / "sample_svgs" / f"{label}.svg", "w") as img_file:
img_file.write(img_svg)
print(".")
print("Done!")
| 29.585366 | 86 | 0.698269 | 165 | 1,213 | 4.830303 | 0.49697 | 0.087829 | 0.050188 | 0.047679 | 0.125471 | 0.067754 | 0 | 0 | 0 | 0 | 0 | 0.007121 | 0.189613 | 1,213 | 40 | 87 | 30.325 | 0.803662 | 0.039571 | 0 | 0 | 1 | 0 | 0.162651 | 0.037866 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
815535942d00809101f7b9f361c4f256b557f56f | 1,321 | py | Python | examples/generated_sample_regression.py | micheleantonazzi/gibson-dataset | cb5fc81061bbda1a653d6fc7b625b14c8a517f3c | [
"MIT"
] | 3 | 2021-10-31T17:43:50.000Z | 2022-03-21T08:55:01.000Z | examples/generated_sample_regression.py | micheleantonazzi/gibson-dataset | cb5fc81061bbda1a653d6fc7b625b14c8a517f3c | [
"MIT"
] | null | null | null | examples/generated_sample_regression.py | micheleantonazzi/gibson-dataset | cb5fc81061bbda1a653d6fc7b625b14c8a517f3c | [
"MIT"
] | null | null | null | from generic_dataset.data_pipeline import DataPipeline
from generic_dataset.generic_sample import synchronize_on_fields
from generic_dataset.sample_generator import SampleGenerator
import numpy as np
import generic_dataset.utilities.save_load_methods as slm
pipeline_rgb_to_gbr = DataPipeline().add_operation(lambda data, engine: (data[:, :, [2, 1, 0]], engine))
@synchronize_on_fields(field_names={'field_3'}, check_pipeline=False)
def field_3_is_positive(sample) -> bool:
return sample.get_field_3() > 0
# To model a regression problem, label_set parameter must be empty
GeneratedSampleRegression = SampleGenerator(name='GeneratedSampleRegression', label_set=set()).add_dataset_field(field_name='rgb_image', field_type=np.ndarray, save_function=slm.save_compressed_numpy_array, load_function=slm.load_compressed_numpy_array) \
.add_dataset_field(field_name='bgr_image', field_type=np.ndarray, save_function=slm.save_cv2_image_bgr, load_function=slm.load_cv2_image_bgr) \
.add_field(field_name='field_3', field_type=int) \
.add_custom_pipeline(method_name='create_pipeline_convert_rgb_to_bgr', elaborated_field='rgb_image', final_field='bgr_image', pipeline=pipeline_rgb_to_gbr) \
.add_custom_method(method_name='field_3_is_positive', function=field_3_is_positive) \
.generate_sample_class() | 62.904762 | 255 | 0.824375 | 192 | 1,321 | 5.239583 | 0.369792 | 0.035785 | 0.053678 | 0.047714 | 0.131213 | 0.083499 | 0.083499 | 0.083499 | 0.083499 | 0 | 0 | 0.009868 | 0.079485 | 1,321 | 21 | 256 | 62.904762 | 0.817434 | 0.048448 | 0 | 0 | 0 | 0 | 0.101911 | 0.046975 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.333333 | 0.066667 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
815d2bb0d4f56879066adfa37185b3b120de6583 | 8,457 | py | Python | qqbot/qqbotcls.py | skarl-api/qqbot | 825ce91c080f4a315860e26df70d687a4ded7159 | [
"MIT"
] | null | null | null | qqbot/qqbotcls.py | skarl-api/qqbot | 825ce91c080f4a315860e26df70d687a4ded7159 | [
"MIT"
] | null | null | null | qqbot/qqbotcls.py | skarl-api/qqbot | 825ce91c080f4a315860e26df70d687a4ded7159 | [
"MIT"
] | 1 | 2020-03-30T08:06:24.000Z | 2020-03-30T08:06:24.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
QQBot -- A conversation robot base on Tencent's SmartQQ
Website -- https://github.com/pandolia/qqbot/
Author -- pandolia@yeah.net
"""
import sys, os
p = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if p not in sys.path:
sys.path.insert(0, p)
import sys, subprocess, time
from apscheduler.schedulers.background import BackgroundScheduler
from collections import defaultdict
from qqbot.qconf import QConf
from qqbot.utf8logger import INFO, CRITICAL, ERROR, WARN
from qqbot.qsession import QLogin, RequestError
from qqbot.exitcode import RESTART, POLL_ERROR, FRESH_RESTART
from qqbot.common import StartDaemonThread, Import
from qqbot.qterm import QTermServer
from qqbot.mainloop import MainLoop, Put
from qqbot.groupmanager import GroupManager
def runBot(botCls, qq, user):
if sys.argv[-1] == '--subprocessCall':
isSubprocessCall = True
sys.argv.pop()
else:
isSubprocessCall = False
if isSubprocessCall:
bot = botCls()
bot.Login(qq, user)
bot.Run()
else:
conf = QConf(qq, user)
if sys.argv[0].endswith('py') or sys.argv[0].endswith('pyc'):
args = [sys.executable] + sys.argv
else:
args = sys.argv
args = args + ['--mailAuthCode', conf.mailAuthCode]
args = args + ['--qq', conf.qq]
args = args + ['--subprocessCall']
while True:
p = subprocess.Popen(args)
pid = p.pid
code = p.wait()
if code == 0:
INFO('QQBot 正常停止')
sys.exit(code)
elif code == RESTART:
args[-2] = conf.LoadQQ(pid)
INFO('5 秒后重新启动 QQBot (自动登陆)')
time.sleep(5)
elif code == FRESH_RESTART:
args[-2] = ''
INFO('5 秒后重新启动 QQBot (手工登陆)')
time.sleep(5)
else:
CRITICAL('QQBOT 异常停止(code=%s)', code)
if conf.restartOnOffline:
args[-2] = conf.LoadQQ(pid)
INFO('30秒后重新启动 QQBot (自动登陆)')
time.sleep(30)
else:
sys.exit(code)
def RunBot(botCls=None, qq=None, user=None):
try:
runBot((botCls or QQBot), qq, user)
except KeyboardInterrupt:
sys.exit(1)
class QQBot(GroupManager):
def Login(self, qq=None, user=None):
session, contactdb, self.conf = QLogin(qq, user)
# main thread
self.SendTo = session.SendTo
self.groupKick = session.GroupKick
self.groupSetAdmin = session.GroupSetAdmin
self.groupShut = session.GroupShut
self.groupSetCard = session.GroupSetCard
# main thread
self.List = contactdb.List
self.Update = contactdb.Update
self.StrOfList = contactdb.StrOfList
self.ObjOfList = contactdb.ObjOfList
self.findSender = contactdb.FindSender
self.firstFetch = contactdb.FirstFetch
self.Delete = contactdb.db.Delete
self.Modify = contactdb.db.Modify
# child thread 1
self.poll = session.Copy().Poll
# child thread 2
self.termForver = QTermServer(self.conf.termServerPort).Run
def Run(self):
QQBot.initScheduler(self)
import qqbot.qslots as _x; _x
for plugin in self.conf.plugins:
self.Plug(plugin)
if self.conf.startAfterFetch:
self.firstFetch()
self.onStartupComplete()
StartDaemonThread(self.pollForever)
StartDaemonThread(self.termForver, self.onTermCommand)
StartDaemonThread(self.intervalForever)
MainLoop()
def Stop(self):
sys.exit(0)
def Restart(self):
self.conf.StoreQQ()
sys.exit(RESTART)
def FreshRestart(self):
sys.exit(FRESH_RESTART)
# child thread 1
def pollForever(self):
while True:
try:
result = self.poll()
except RequestError:
self.conf.StoreQQ()
Put(sys.exit, POLL_ERROR)
break
except:
ERROR('qsession.Poll 方法出错', exc_info=True)
else:
Put(self.onPollComplete, *result)
def onPollComplete(self, ctype, fromUin, membUin, content):
if ctype == 'timeout':
return
contact, member, nameInGroup = \
self.findSender(ctype, fromUin, membUin, self.conf.qq)
if self.detectAtMe(nameInGroup, content):
INFO('有人 @ 我:%s[%s]' % (contact, member))
content = '[@ME] ' + content.replace('@'+nameInGroup, '')
else:
content = content.replace('@ME', '@Me')
if ctype == 'buddy':
INFO('来自 %s 的消息: "%s"' % (contact, content))
else:
INFO('来自 %s[%s] 的消息: "%s"' % (contact, member, content))
self.onQQMessage(contact, member, content)
def detectAtMe(self, nameInGroup, content):
return nameInGroup and ('@'+nameInGroup) in content
# child thread 5
def intervalForever(self):
while True:
time.sleep(300)
Put(self.onInterval)
slotsTable = {
'onQQMessage': [],
'onInterval': [],
'onStartupComplete': []
}
plugins = set()
@classmethod
def AddSlot(cls, func):
cls.slotsTable[func.__name__].append(func)
return func
@classmethod
def unplug(cls, moduleName, removeJob=True):
for slots in cls.slotsTable.values():
i = 0
while i < len(slots):
if slots[i].__module__ == moduleName:
slots[i] = slots[-1]
slots.pop()
else:
i += 1
if removeJob:
for job in cls.schedTable.pop(moduleName, []):
job.remove()
cls.plugins.discard(moduleName)
@classmethod
def Unplug(cls, moduleName):
if moduleName not in cls.plugins:
result = '警告:试图卸载未安装的插件 %s' % moduleName
WARN(result)
else:
cls.unplug(moduleName)
result = '成功:卸载插件 %s' % moduleName
INFO(result)
return result
@classmethod
def Plug(cls, moduleName):
cls.unplug(moduleName)
try:
module = Import(moduleName)
except (Exception, SystemExit) as e:
result = '错误:无法加载插件 %s ,%s: %s' % (moduleName, type(e), e)
ERROR(result)
else:
cls.unplug(moduleName, removeJob=False)
names = []
for slotName in cls.slotsTable.keys():
if hasattr(module, slotName):
cls.slotsTable[slotName].append(getattr(module, slotName))
names.append(slotName)
if (not names) and (moduleName not in cls.schedTable):
result = '警告:插件 %s 中没有定义回调函数或定时任务' % moduleName
WARN(result)
else:
cls.plugins.add(moduleName)
jobs = cls.schedTable.get(moduleName,[])
jobNames = [f.func.__name__ for f in jobs]
result = '成功:加载插件 %s(回调函数%s、定时任务%s)' % \
(moduleName, names, jobNames)
INFO(result)
return result
@classmethod
def Plugins(cls):
return list(cls.plugins)
scheduler = BackgroundScheduler(daemon=True)
schedTable = defaultdict(list)
@classmethod
def initScheduler(cls, bot):
cls._bot = bot
cls.scheduler.start()
@classmethod
def AddSched(cls, **triggerArgs):
def wrapper(func):
job = lambda: Put(func, cls._bot)
job.__name__ = func.__name__
j = cls.scheduler.add_job(job, 'cron', **triggerArgs)
cls.schedTable[func.__module__].append(j)
return func
return wrapper
def wrap(slots):
return lambda *a,**kw: [f(*a, **kw) for f in slots[:]]
for name, slots in QQBot.slotsTable.items():
setattr(QQBot, name, wrap(slots))
QQBotSlot = QQBot.AddSlot
QQBotSched = QQBot.AddSched
if __name__ == '__main__':
bot = QQBot()
bot.Login(user='hcj')
gl = bot.List('group')
ml = bot.List(gl[0])
m = ml[0]
| 29.262976 | 78 | 0.551614 | 885 | 8,457 | 5.219209 | 0.275706 | 0.015588 | 0.01299 | 0.006495 | 0.074259 | 0.025114 | 0 | 0 | 0 | 0 | 0 | 0.005715 | 0.337945 | 8,457 | 288 | 79 | 29.364583 | 0.819075 | 0.014426 | 0 | 0.193694 | 0 | 0 | 0.047642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.063063 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
816842032e46719c27ed0ea91d613473a3f094ca | 601 | py | Python | architecture_tool_django/graphdefs/urls.py | goldginkgo/architecture_tool_django | e4229c5938a4dd01d0877afa7b93daf68e09283b | [
"MIT"
] | 1 | 2021-08-13T01:37:29.000Z | 2021-08-13T01:37:29.000Z | architecture_tool_django/graphdefs/urls.py | goldginkgo/architecture_tool_django | e4229c5938a4dd01d0877afa7b93daf68e09283b | [
"MIT"
] | null | null | null | architecture_tool_django/graphdefs/urls.py | goldginkgo/architecture_tool_django | e4229c5938a4dd01d0877afa7b93daf68e09283b | [
"MIT"
] | 1 | 2021-07-19T07:57:54.000Z | 2021-07-19T07:57:54.000Z | from django.urls import path
from . import views
app_name = "graphs"
urlpatterns = [
path("graphs/", views.GraphListView.as_view(), name="graph.list"),
path("graphs/create/", views.GraphCreateView.as_view(), name="graph.create"),
path(
"graphs/<str:pk>/",
views.GraphDetailView.as_view(),
name="graph.detail",
),
path(
"graphs/<str:pk>/update/",
views.GraphUpdateView.as_view(),
name="graph.update",
),
path(
"graphs/<str:pk>/delete/",
views.GraphDeleteView.as_view(),
name="graph.delete",
),
]
| 24.04 | 81 | 0.587354 | 66 | 601 | 5.257576 | 0.378788 | 0.144092 | 0.144092 | 0.216138 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.236273 | 601 | 24 | 82 | 25.041667 | 0.755991 | 0 | 0 | 0.272727 | 0 | 0 | 0.244592 | 0.076539 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81690ba836e0e2d1c0fdfb89754bbbb996e53c02 | 2,823 | py | Python | lib/utils/blob.py | TheRevanchist/DeepWatershedDetection | 6d8f3b3ca6db67bcebef8e18fb11248e15bd9dc4 | [
"MIT"
] | null | null | null | lib/utils/blob.py | TheRevanchist/DeepWatershedDetection | 6d8f3b3ca6db67bcebef8e18fb11248e15bd9dc4 | [
"MIT"
] | null | null | null | lib/utils/blob.py | TheRevanchist/DeepWatershedDetection | 6d8f3b3ca6db67bcebef8e18fb11248e15bd9dc4 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick - extended by Lukas Tuggener
# --------------------------------------------------------
"""Blob helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
import random
def im_list_to_blob(ims):
"""Convert a list of images into a network input.
Assumes images are already prepared (means subtracted, BGR order, ...).
"""
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in range(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
return blob
def prep_im_for_blob(im, pixel_means, global_scale, args):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
# substract mean
if args.substract_mean == "True":
im -= pixel_means
# do global scaling
im = cv2.resize(im, None, None, fx=global_scale, fy=global_scale,
interpolation=cv2.INTER_LINEAR)
im_size_max = np.max(im.shape[0:2])
# Prevent the biggest axis from being more than MAX_SIZE
if im_size_max > args.max_edge:
if not args.crop == "True":
# scale down if bigger than max size
re_scale = (float(args.max_edge) / float(im_size_max))
im = cv2.resize(im, None, None, fx=re_scale, fy=re_scale,
interpolation=cv2.INTER_LINEAR)
global_scale = global_scale*re_scale
crop_box = [0,0,im.shape[0],im.shape[1]]
else:
# Crop image
topleft = random.uniform(0,1)<args.crop_top_left_bias
# crop to max size if necessary
if im.shape[0] <= args.max_edge or topleft:
crop_0 = 0
else:
crop_0 = random.randint(0,im.shape[0]-args.max_edge)
if im.shape[1] <= args.max_edge or topleft:
crop_1 = 0
else:
crop_1 = random.randint(0,im.shape[1]-args.max_edge)
crop_box = [crop_0, crop_1, min(crop_0+args.max_edge,im.shape[0]), min(crop_1+args.max_edge,im.shape[1])]
im = im[crop_box[0]:crop_box[2],crop_box[1]:crop_box[3]]
else:
crop_box = [0, 0, im.shape[0], im.shape[1]]
if not args.pad_to == 0:
# pad to fit RefineNet #TODO fix refinenet padding problem
y_mulity = int(np.ceil(im.shape[0] / float(args.pad_to)))
x_mulity = int(np.ceil(im.shape[1] / float(args.pad_to)))
canv = np.ones([y_mulity * args.pad_to, x_mulity * args.pad_to,3], dtype=np.uint8) * 255
canv[0:im.shape[0], 0:im.shape[1]] = im
im = canv
return im, global_scale, crop_box
| 32.825581 | 111 | 0.631598 | 456 | 2,823 | 3.725877 | 0.307018 | 0.074161 | 0.047087 | 0.026486 | 0.260742 | 0.16598 | 0.080047 | 0.052972 | 0.052972 | 0.029429 | 0 | 0.029897 | 0.206164 | 2,823 | 85 | 112 | 33.211765 | 0.728246 | 0.240879 | 0 | 0.16 | 0 | 0 | 0.003795 | 0 | 0 | 0 | 0 | 0.011765 | 0 | 1 | 0.04 | false | 0 | 0.12 | 0 | 0.2 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81691bebff51090814a13a3ea3f9262d90d38a7b | 1,022 | py | Python | edlm/convert/_get_media_folders.py | etcher-be/EDLM | 7b25c85252fd15c2c222b00271f7a32e335db704 | [
"MIT"
] | null | null | null | edlm/convert/_get_media_folders.py | etcher-be/EDLM | 7b25c85252fd15c2c222b00271f7a32e335db704 | [
"MIT"
] | 4 | 2020-03-24T16:53:26.000Z | 2020-06-26T08:31:13.000Z | edlm/convert/_get_media_folders.py | etcher-be/EDLM | 7b25c85252fd15c2c222b00271f7a32e335db704 | [
"MIT"
] | null | null | null | # coding=utf-8
"""
Gathers the media folders
"""
import elib
from ._context import Context
def get_media_folders(ctx: Context):
"""
Gathers the media folders
"""
ctx.info('gathering media folders')
media_folders = []
this_folder = ctx.source_folder
while True:
ctx.debug(f'traversing: "{this_folder}"')
media_folder_candidate = elib.path.ensure_path(this_folder, 'media', must_exist=False).absolute()
if media_folder_candidate.exists() and media_folder_candidate.is_dir():
ctx.debug(f'media folder found: "{media_folder_candidate}"')
media_folders.append(media_folder_candidate)
if len(this_folder.parents) is 1:
ctx.debug(f'reach mount point at: "{this_folder}"')
break
this_folder = this_folder.parent
# if not media_folders:
# raise ConvertError('no media folder found', ctx)
ctx.info(f'media folders:\n{elib.pretty_format(media_folders)}')
ctx.media_folders = media_folders
| 28.388889 | 105 | 0.672211 | 132 | 1,022 | 4.969697 | 0.409091 | 0.20122 | 0.152439 | 0.067073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002503 | 0.2182 | 1,022 | 35 | 106 | 29.2 | 0.818523 | 0.136986 | 0 | 0 | 0 | 0 | 0.220537 | 0.082847 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.111111 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8171ba68e87f53d5c2ecb6dd90deb2acd88e328d | 34,379 | py | Python | datastore/core/basic.py | datastore/datastore | 7ccf0cd4748001d3dbf5e6dda369b0f63e0269d3 | [
"MIT"
] | 65 | 2015-03-22T23:43:48.000Z | 2022-03-25T16:10:33.000Z | datastore/core/basic.py | datastore/datastore | 7ccf0cd4748001d3dbf5e6dda369b0f63e0269d3 | [
"MIT"
] | 3 | 2015-03-11T21:57:23.000Z | 2019-07-26T16:20:29.000Z | datastore/core/basic.py | datastore/datastore | 7ccf0cd4748001d3dbf5e6dda369b0f63e0269d3 | [
"MIT"
] | 14 | 2015-01-23T17:03:33.000Z | 2020-02-03T06:35:04.000Z |
from key import Key
from query import Cursor
class Datastore(object):
'''A Datastore represents storage for any key-value pair.
Datastores are general enough to be backed by all kinds of different storage:
in-memory caches, databases, a remote datastore, flat files on disk, etc.
The general idea is to wrap a more complicated storage facility in a simple,
uniform interface, keeping the freedom of using the right tools for the job.
In particular, a Datastore can aggregate other datastores in interesting ways,
like sharded (to distribute load) or tiered access (caches before databases).
While Datastores should be written general enough to accept all sorts of
values, some implementations will undoubtedly have to be specific (e.g. SQL
databases where fields should be decomposed into columns), particularly to
support queries efficiently.
'''
# Main API. Datastore mplementations MUST implement these methods.
def get(self, key):
'''Return the object named by key or None if it does not exist.
None takes the role of default value, so no KeyError exception is raised.
Args:
key: Key naming the object to retrieve
Returns:
object or None
'''
raise NotImplementedError
def put(self, key, value):
'''Stores the object `value` named by `key`.
How to serialize and store objects is up to the underlying datastore.
It is recommended to use simple objects (strings, numbers, lists, dicts).
Args:
key: Key naming `value`
value: the object to store.
'''
raise NotImplementedError
def delete(self, key):
'''Removes the object named by `key`.
Args:
key: Key naming the object to remove.
'''
raise NotImplementedError
def query(self, query):
'''Returns an iterable of objects matching criteria expressed in `query`
Implementations of query will be the largest differentiating factor
amongst datastores. All datastores **must** implement query, even using
query's worst case scenario, see :ref:class:`Query` for details.
Args:
query: Query object describing the objects to return.
Raturns:
iterable cursor with all objects matching criteria
'''
raise NotImplementedError
# Secondary API. Datastores MAY provide optimized implementations.
def contains(self, key):
'''Returns whether the object named by `key` exists.
The default implementation pays the cost of a get. Some datastore
implementations may optimize this.
Args:
key: Key naming the object to check.
Returns:
boalean whether the object exists
'''
return self.get(key) is not None
class NullDatastore(Datastore):
'''Stores nothing, but conforms to the API. Useful to test with.'''
def get(self, key):
'''Return the object named by key or None if it does not exist (None).'''
return None
def put(self, key, value):
'''Store the object `value` named by `key` (does nothing).'''
pass
def delete(self, key):
'''Remove the object named by `key` (does nothing).'''
pass
def query(self, query):
'''Returns an iterable of objects matching criteria in `query` (empty).'''
return query([])
class DictDatastore(Datastore):
'''Simple straw-man in-memory datastore backed by nested dicts.'''
def __init__(self):
self._items = dict()
def _collection(self, key):
'''Returns the namespace collection for `key`.'''
collection = str(key.path)
if not collection in self._items:
self._items[collection] = dict()
return self._items[collection]
def get(self, key):
'''Return the object named by `key` or None.
Retrieves the object from the collection corresponding to ``key.path``.
Args:
key: Key naming the object to retrieve.
Returns:
object or None
'''
try:
return self._collection(key)[key]
except KeyError, e:
return None
def put(self, key, value):
'''Stores the object `value` named by `key`.
Stores the object in the collection corresponding to ``key.path``.
Args:
key: Key naming `value`
value: the object to store.
'''
if value is None:
self.delete(key)
else:
self._collection(key)[key] = value
def delete(self, key):
'''Removes the object named by `key`.
Removes the object from the collection corresponding to ``key.path``.
Args:
key: Key naming the object to remove.
'''
try:
del self._collection(key)[key]
if len(self._collection(key)) == 0:
del self._items[str(key.path)]
except KeyError, e:
pass
def contains(self, key):
'''Returns whether the object named by `key` exists.
Checks for the object in the collection corresponding to ``key.path``.
Args:
key: Key naming the object to check.
Returns:
boalean whether the object exists
'''
return key in self._collection(key)
def query(self, query):
'''Returns an iterable of objects matching criteria expressed in `query`
Naively applies the query operations on the objects within the namespaced
collection corresponding to ``query.key.path``.
Args:
query: Query object describing the objects to return.
Raturns:
iterable cursor with all objects matching criteria
'''
# entire dataset already in memory, so ok to apply query naively
if str(query.key) in self._items:
return query(self._items[str(query.key)].values())
else:
return query([])
def __len__(self):
return sum(map(len, self._items.values()))
class InterfaceMappingDatastore(Datastore):
'''Represents simple wrapper datastore around an object that, though not a
Datastore, implements data storage through a similar interface. For example,
memcached and redis both implement a `get`, `set`, `delete` interface.
'''
def __init__(self, service, get='get', put='put', delete='delete', key=str):
'''Initialize the datastore with given `service`.
Args:
service: A service that provides data storage through a similar interface
to Datastore. Using the service should only require a simple mapping
of methods, such as {put : set}.
get: The attribute name of the `service` method implementing get
put: The attribute name of the `service` method implementing put
delete: The attribute name of the `service` method implementing delete
key: A function converting a Datastore key (of type Key) into a `service`
key. The conversion will often be as simple as `str`.
'''
self._service = service
self._service_key = key
self._service_ops = {}
self._service_ops['get'] = getattr(service, get)
self._service_ops['put'] = getattr(service, put)
self._service_ops['delete'] = getattr(service, delete)
# AttributeError will be raised if service does not implement the interface
def get(self, key):
'''Return the object in `service` named by `key` or None.
Args:
key: Key naming the object to retrieve.
Returns:
object or None
'''
key = self._service_key(key)
return self._service_ops['get'](key)
def put(self, key, value):
'''Stores the object `value` named by `key` in `service`.
Args:
key: Key naming `value`.
value: the object to store.
'''
key = self._service_key(key)
self._service_ops['put'](key, value)
def delete(self, key):
'''Removes the object named by `key` in `service`.
Args:
key: Key naming the object to remove.
'''
key = self._service_key(key)
self._service_ops['delete'](key)
class ShimDatastore(Datastore):
'''Represents a non-concrete datastore that adds functionality between the
client and a lower level datastore. Shim datastores do not actually store
data themselves; instead, they delegate storage to an underlying child
datastore. The default implementation just passes all calls to the child.
'''
def __init__(self, datastore):
'''Initializes this ShimDatastore with child `datastore`.'''
if not isinstance(datastore, Datastore):
errstr = 'datastore must be of type %s. Got %s.'
raise TypeError(errstr % (Datastore, datastore))
self.child_datastore = datastore
# default implementation just passes all calls to child
def get(self, key):
'''Return the object named by key or None if it does not exist.
Default shim implementation simply returns ``child_datastore.get(key)``
Override to provide different functionality, for example::
def get(self, key):
value = self.child_datastore.get(key)
return json.loads(value)
Args:
key: Key naming the object to retrieve
Returns:
object or None
'''
return self.child_datastore.get(key)
def put(self, key, value):
'''Stores the object `value` named by `key`.
Default shim implementation simply calls ``child_datastore.put(key, value)``
Override to provide different functionality, for example::
def put(self, key, value):
value = json.dumps(value)
self.child_datastore.put(key, value)
Args:
key: Key naming `value`.
value: the object to store.
'''
self.child_datastore.put(key, value)
def delete(self, key):
'''Removes the object named by `key`.
Default shim implementation simply calls ``child_datastore.delete(key)``
Override to provide different functionality.
Args:
key: Key naming the object to remove.
'''
self.child_datastore.delete(key)
def query(self, query):
'''Returns an iterable of objects matching criteria expressed in `query`.
Default shim implementation simply returns ``child_datastore.query(query)``
Override to provide different functionality, for example::
def query(self, query):
cursor = self.child_datastore.query(query)
cursor._iterable = deserialized(cursor._iterable)
return cursor
Args:
query: Query object describing the objects to return.
Raturns:
iterable cursor with all objects matching criteria
'''
return self.child_datastore.query(query)
class CacheShimDatastore(ShimDatastore):
'''Wraps a datastore with a caching shim optimizes some calls.'''
def __init__(self, *args, **kwargs):
self.cache_datastore = kwargs.pop('cache')
if not isinstance(self.cache_datastore, Datastore):
errstr = 'datastore must be of type %s. Got %s.'
raise TypeError(errstr % (Datastore, self.cache_datastore))
super(CacheShimDatastore, self).__init__(*args, **kwargs)
def get(self, key):
'''Return the object named by key or None if it does not exist.
CacheShimDatastore first checks its ``cache_datastore``.
'''
value = self.cache_datastore.get(key)
return value if value is not None else self.child_datastore.get(key)
def put(self, key, value):
'''Stores the object `value` named by `key`self.
Writes to both ``cache_datastore`` and ``child_datastore``.
'''
self.cache_datastore.put(key, value)
self.child_datastore.put(key, value)
def delete(self, key):
'''Removes the object named by `key`.
Writes to both ``cache_datastore`` and ``child_datastore``.
'''
self.cache_datastore.delete(key)
self.child_datastore.delete(key)
def contains(self, key):
'''Returns whether the object named by `key` exists.
First checks ``cache_datastore``.
'''
return self.cache_datastore.contains(key) \
or self.child_datastore.contains(key)
class LoggingDatastore(ShimDatastore):
'''Wraps a datastore with a logging shim.'''
def __init__(self, child_datastore, logger=None):
if not logger:
import logging
logger = logging
self.logger = logger
super(LoggingDatastore, self).__init__(child_datastore)
def get(self, key):
'''Return the object named by key or None if it does not exist.
LoggingDatastore logs the access.
'''
self.logger.info('%s: get %s' % (self, key))
value = super(LoggingDatastore, self).get(key)
self.logger.debug('%s: %s' % (self, value))
return value
def put(self, key, value):
'''Stores the object `value` named by `key`self.
LoggingDatastore logs the access.
'''
self.logger.info('%s: put %s' % (self, key))
self.logger.debug('%s: %s' % (self, value))
super(LoggingDatastore, self).put(key, value)
def delete(self, key):
'''Removes the object named by `key`.
LoggingDatastore logs the access.
'''
self.logger.info('%s: delete %s' % (self, key))
super(LoggingDatastore, self).delete(key)
def contains(self, key):
'''Returns whether the object named by `key` exists.
LoggingDatastore logs the access.
'''
self.logger.info('%s: contains %s' % (self, key))
return super(LoggingDatastore, self).contains(key)
def query(self, query):
'''Returns an iterable of objects matching criteria expressed in `query`.
LoggingDatastore logs the access.
'''
self.logger.info('%s: query %s' % (self, query))
return super(LoggingDatastore, self).query(query)
class KeyTransformDatastore(ShimDatastore):
'''Represents a simple ShimDatastore that applies a transform on all incoming
keys. For example:
>>> import datastore.core
>>> def transform(key):
... return key.reverse
...
>>> ds = datastore.DictDatastore()
>>> kt = datastore.KeyTransformDatastore(ds, keytransform=transform)
None
>>> ds.put(datastore.Key('/a/b/c'), 'abc')
>>> ds.get(datastore.Key('/a/b/c'))
'abc'
>>> kt.get(datastore.Key('/a/b/c'))
None
>>> kt.get(datastore.Key('/c/b/a'))
'abc'
>>> ds.get(datastore.Key('/c/b/a'))
None
'''
def __init__(self, *args, **kwargs):
'''Initializes KeyTransformDatastore with `keytransform` function.'''
self.keytransform = kwargs.pop('keytransform', None)
super(KeyTransformDatastore, self).__init__(*args, **kwargs)
def get(self, key):
'''Return the object named by keytransform(key).'''
return self.child_datastore.get(self._transform(key))
def put(self, key, value):
'''Stores the object names by keytransform(key).'''
return self.child_datastore.put(self._transform(key), value)
def delete(self, key):
'''Removes the object named by keytransform(key).'''
return self.child_datastore.delete(self._transform(key))
def contains(self, key):
'''Returns whether the object named by key is in this datastore.'''
return self.child_datastore.contains(self._transform(key))
def query(self, query):
'''Returns a sequence of objects matching criteria expressed in `query`'''
query = query.copy()
query.key = self._transform(query.key)
return self.child_datastore.query(query)
def _transform(self, key):
'''Returns a `key` transformed by `self.keytransform`.'''
return self.keytransform(key) if self.keytransform else key
class LowercaseKeyDatastore(KeyTransformDatastore):
'''Represents a simple ShimDatastore that lowercases all incoming keys.
For example:
>>> import datastore.core
>>> ds = datastore.DictDatastore()
>>> ds.put(datastore.Key('hello'), 'world')
>>> ds.put(datastore.Key('HELLO'), 'WORLD')
>>> ds.get(datastore.Key('hello'))
'world'
>>> ds.get(datastore.Key('HELLO'))
'WORLD'
>>> ds.get(datastore.Key('HeLlO'))
None
>>> lds = datastore.LowercaseKeyDatastore(ds)
>>> lds.get(datastore.Key('HeLlO'))
'world'
>>> lds.get(datastore.Key('HeLlO'))
'world'
>>> lds.get(datastore.Key('HeLlO'))
'world'
'''
def __init__(self, *args, **kwargs):
'''Initializes KeyTransformDatastore with keytransform function.'''
super(LowercaseKeyDatastore, self).__init__(*args, **kwargs)
self.keytransform = self.lowercaseKey
@classmethod
def lowercaseKey(cls, key):
'''Returns a lowercased `key`.'''
return Key(str(key).lower())
class NamespaceDatastore(KeyTransformDatastore):
'''Represents a simple ShimDatastore that namespaces all incoming keys.
For example:
>>> import datastore.core
>>>
>>> ds = datastore.DictDatastore()
>>> ds.put(datastore.Key('/a/b'), 'ab')
>>> ds.put(datastore.Key('/c/d'), 'cd')
>>> ds.put(datastore.Key('/a/b/c/d'), 'abcd')
>>>
>>> nd = datastore.NamespaceDatastore('/a/b', ds)
>>> nd.get(datastore.Key('/a/b'))
None
>>> nd.get(datastore.Key('/c/d'))
'abcd'
>>> nd.get(datastore.Key('/a/b/c/d'))
None
>>> nd.put(datastore.Key('/c/d'), 'cd')
>>> ds.get(datastore.Key('/a/b/c/d'))
'cd'
'''
def __init__(self, namespace, *args, **kwargs):
'''Initializes NamespaceDatastore with `key` namespace.'''
super(NamespaceDatastore, self).__init__(*args, **kwargs)
self.keytransform = self.namespaceKey
self.namespace = Key(namespace)
def namespaceKey(self, key):
'''Returns a namespaced `key`: namespace.child(key).'''
return self.namespace.child(key)
class NestedPathDatastore(KeyTransformDatastore):
'''Represents a simple ShimDatastore that shards/namespaces incoming keys.
Incoming keys are sharded into nested namespaces. The idea is to use the key
name to separate into nested namespaces. This is akin to the directory
structure that ``git`` uses for objects. For example:
>>> import datastore.core
>>>
>>> ds = datastore.DictDatastore()
>>> np = datastore.NestedPathDatastore(ds, depth=3, length=2)
>>>
>>> np.put(datastore.Key('/abcdefghijk'), 1)
>>> np.get(datastore.Key('/abcdefghijk'))
1
>>> ds.get(datastore.Key('/abcdefghijk'))
None
>>> ds.get(datastore.Key('/ab/cd/ef/abcdefghijk'))
1
>>> np.put(datastore.Key('abc'), 2)
>>> np.get(datastore.Key('abc'))
2
>>> ds.get(datastore.Key('/ab/ca/bc/abc'))
2
'''
_default_depth = 3
_default_length = 2
_default_keyfn = lambda key: key.name
_default_keyfn = staticmethod(_default_keyfn)
def __init__(self, *args, **kwargs):
'''Initializes KeyTransformDatastore with keytransform function.
kwargs:
depth: the nesting level depth (e.g. 3 => /1/2/3/123) default: 3
length: the nesting level length (e.g. 2 => /12/123456) default: 2
'''
# assign the nesting variables
self.nest_depth = kwargs.pop('depth', self._default_depth)
self.nest_length = kwargs.pop('length', self._default_length)
self.nest_keyfn = kwargs.pop('keyfn', self._default_keyfn)
super(NestedPathDatastore, self).__init__(*args, **kwargs)
self.keytransform = self.nestKey
def query(self, query):
# Requires supporting * operator on queries.
raise NotImplementedError
def nestKey(self, key):
'''Returns a nested `key`.'''
nest = self.nest_keyfn(key)
# if depth * length > len(key.name), we need to pad.
mult = 1 + int(self.nest_depth * self.nest_length / len(nest))
nest = nest * mult
pref = Key(self.nestedPath(nest, self.nest_depth, self.nest_length))
return pref.child(key)
@staticmethod
def nestedPath(path, depth, length):
'''returns a nested version of `basename`, using the starting characters.
For example:
>>> NestedPathDatastore.nested_path('abcdefghijk', 3, 2)
'ab/cd/ef'
>>> NestedPathDatastore.nested_path('abcdefghijk', 4, 2)
'ab/cd/ef/gh'
>>> NestedPathDatastore.nested_path('abcdefghijk', 3, 4)
'abcd/efgh/ijk'
>>> NestedPathDatastore.nested_path('abcdefghijk', 1, 4)
'abcd'
>>> NestedPathDatastore.nested_path('abcdefghijk', 3, 10)
'abcdefghij/k'
'''
components = [path[n:n+length] for n in xrange(0, len(path), length)]
components = components[:depth]
return '/'.join(components)
class SymlinkDatastore(ShimDatastore):
'''Datastore that creates filesystem-like symbolic link keys.
A symbolic link key is a way of naming the same value with multiple keys.
For example:
>>> import datastore.core
>>>
>>> dds = datastore.DictDatastore()
>>> sds = datastore.SymlinkDatastore(dds)
>>>
>>> a = datastore.Key('/A')
>>> b = datastore.Key('/B')
>>>
>>> sds.put(a, 1)
>>> sds.get(a)
1
>>> sds.link(a, b)
>>> sds.get(b)
1
>>> sds.put(b, 2)
>>> sds.get(b)
2
>>> sds.get(a)
2
>>> sds.delete(a)
>>> sds.get(a)
None
>>> sds.get(b)
None
>>> sds.put(a, 3)
>>> sds.get(a)
3
>>> sds.get(b)
3
>>> sds.delete(b)
>>> sds.get(b)
None
>>> sds.get(a)
3
'''
sentinel = 'datastore_link'
def _link_value_for_key(self, source_key):
'''Returns the link value for given `key`.'''
return str(source_key.child(self.sentinel))
def _link_for_value(self, value):
'''Returns the linked key if `value` is a link, or None.'''
try:
key = Key(value)
if key.name == self.sentinel:
return key.parent
except:
pass
return None
def _follow_link(self, value):
'''Returns given `value` or, if it is a symlink, the `value` it names.'''
seen_keys = set()
while True:
link_key = self._link_for_value(value)
if not link_key:
return value
assert link_key not in seen_keys, 'circular symlink reference'
seen_keys.add(link_key)
value = super(SymlinkDatastore, self).get(link_key)
def _follow_link_gen(self, iterable):
'''A generator that follows links in values encountered.'''
for item in iterable:
yield self._follow_link(item)
def link(self, source_key, target_key):
'''Creates a symbolic link key pointing from `target_key` to `source_key`'''
link_value = self._link_value_for_key(source_key)
# put straight into the child, to avoid following previous links.
self.child_datastore.put(target_key, link_value)
# exercise the link. ensure there are no cycles.
self.get(target_key)
def get(self, key):
'''Return the object named by `key. Follows links.'''
value = super(SymlinkDatastore, self).get(key)
return self._follow_link(value)
def put(self, key, value):
'''Stores the object named by `key`. Follows links.'''
# if value is a link, don't follow links
if self._link_for_value(value):
super(SymlinkDatastore, self).put(key, value)
return
# if `key` points to a symlink, need to follow it.
current_value = super(SymlinkDatastore, self).get(key)
link_key = self._link_for_value(current_value)
if link_key:
self.put(link_key, value) # self.put: could be another link.
else:
super(SymlinkDatastore, self).put(key, value)
def query(self, query):
'''Returns objects matching criteria expressed in `query`. Follows links.'''
results = super(SymlinkDatastore, self).query(query)
return self._follow_link_gen(results)
class DirectoryDatastore(ShimDatastore):
'''Datastore that allows manual tracking of directory entries.
For example:
>>> ds = DirectoryDatastore(ds)
>>>
>>> # initialize directory at /foo
>>> ds.directory(Key('/foo'))
>>>
>>> # adding directory entries
>>> ds.directoryAdd(Key('/foo'), Key('/foo/bar'))
>>> ds.directoryAdd(Key('/foo'), Key('/foo/baz'))
>>>
>>> # value is a generator returning all the keys in this dir
>>> for key in ds.directoryRead(Key('/foo')):
... print key
Key('/foo/bar')
Key('/foo/baz')
>>>
>>> # querying for a collection works
>>> for item in ds.query(Query(Key('/foo'))):
... print item
'bar'
'baz'
'''
def directory(self, dir_key):
'''Initializes directory at dir_key.'''
dir_items = self.get(dir_key)
if not isinstance(dir_items, list):
self.put(dir_key, [])
def directoryRead(self, dir_key):
'''Returns a generator that iterates over all keys in the directory
referenced by `dir_key`
Returns None if the directory `dir_key` does not exist
'''
return self.directory_entries_generator(dir_key)
def directoryAdd(self, dir_key, key):
'''Adds directory entry `key` to directory at `dir_key`.
If the directory `dir_key` does not exist, it is created.
'''
key = str(key)
dir_items = self.get(dir_key) or []
if key not in dir_items:
dir_items.append(key)
self.put(dir_key, dir_items)
def directoryRemove(self, dir_key, key):
'''Removes directory entry `key` from directory at `dir_key`.
If either the directory `dir_key` or the directory entry `key` don't exist,
this method is a no-op.
'''
key = str(key)
dir_items = self.get(dir_key) or []
if key in dir_items:
dir_items = [k for k in dir_items if k != key]
self.put(dir_key, dir_items)
def directory_entries_generator(self, dir_key):
dir_items = self.get(dir_key) or []
for item in dir_items:
yield Key(item)
class DirectoryTreeDatastore(ShimDatastore):
'''Datastore that tracks directory entries, like in a filesystem.
All key changes cause changes in a collection-like directory.
For example:
>>> import datastore.core
>>>
>>> dds = datastore.DictDatastore()
>>> rds = datastore.DirectoryTreeDatastore(dds)
>>>
>>> a = datastore.Key('/A')
>>> b = datastore.Key('/A/B')
>>> c = datastore.Key('/A/C')
>>>
>>> rds.get(a)
[]
>>> rds.put(b, 1)
>>> rds.get(b)
1
>>> rds.get(a)
['/A/B']
>>> rds.put(c, 1)
>>> rds.get(c)
1
>>> rds.get(a)
['/A/B', '/A/C']
>>> rds.delete(b)
>>> rds.get(a)
['/A/C']
>>> rds.delete(c)
>>> rds.get(a)
[]
'''
def put(self, key, value):
'''Stores the object `value` named by `key`self.
DirectoryTreeDatastore stores a directory entry.
'''
super(DirectoryTreeDatastore, self).put(key, value)
str_key = str(key)
# ignore root
if str_key == '/':
return
# retrieve directory, to add entry
dir_key = key.parent.instance('directory')
directory = self.directory(dir_key)
# ensure key is in directory
if str_key not in directory:
directory.append(str_key)
super(DirectoryTreeDatastore, self).put(dir_key, directory)
def delete(self, key):
'''Removes the object named by `key`.
DirectoryTreeDatastore removes the directory entry.
'''
super(DirectoryTreeDatastore, self).delete(key)
str_key = str(key)
# ignore root
if str_key == '/':
return
# retrieve directory, to remove entry
dir_key = key.parent.instance('directory')
directory = self.directory(dir_key)
# ensure key is not in directory
if directory and str_key in directory:
directory.remove(str_key)
if len(directory) > 0:
super(DirectoryTreeDatastore, self).put(dir_key, directory)
else:
super(DirectoryTreeDatastore, self).delete(dir_key)
def query(self, query):
'''Returns objects matching criteria expressed in `query`.
DirectoryTreeDatastore uses directory entries.
'''
return query(self.directory_values_generator(query.key))
def directory(self, key):
'''Retrieves directory entries for given key.'''
if key.name != 'directory':
key = key.instance('directory')
return self.get(key) or []
def directory_values_generator(self, key):
'''Retrieve directory values for given key.'''
directory = self.directory(key)
for key in directory:
yield self.get(Key(key))
class DatastoreCollection(ShimDatastore):
'''Represents a collection of datastores.'''
def __init__(self, stores=[]):
'''Initialize the datastore with any provided datastores.'''
if not isinstance(stores, list):
stores = list(stores)
for store in stores:
if not isinstance(store, Datastore):
raise TypeError("all stores must be of type %s" % Datastore)
self._stores = stores
def datastore(self, index):
'''Returns the datastore at `index`.'''
return self._stores[index]
def appendDatastore(self, store):
'''Appends datastore `store` to this collection.'''
if not isinstance(store, Datastore):
raise TypeError("stores must be of type %s" % Datastore)
self._stores.append(store)
def removeDatastore(self, store):
'''Removes datastore `store` from this collection.'''
self._stores.remove(store)
def insertDatastore(self, index, store):
'''Inserts datastore `store` into this collection at `index`.'''
if not isinstance(store, Datastore):
raise TypeError("stores must be of type %s" % Datastore)
self._stores.insert(index, store)
class TieredDatastore(DatastoreCollection):
'''Represents a hierarchical collection of datastores.
Each datastore is queried in order. This is helpful to organize access
order in terms of speed (i.e. read caches first).
Datastores should be arranged in order of completeness, with the most complete
datastore last, as it will handle query calls.
Semantics:
* get : returns first found value
* put : writes through to all
* delete : deletes through to all
* contains : returns first found value
* query : queries bottom (most complete) datastore
'''
def get(self, key):
'''Return the object named by key. Checks each datastore in order.'''
value = None
for store in self._stores:
value = store.get(key)
if value is not None:
break
# add model to lower stores only
if value is not None:
for store2 in self._stores:
if store == store2:
break
store2.put(key, value)
return value
def put(self, key, value):
'''Stores the object in all underlying datastores.'''
for store in self._stores:
store.put(key, value)
def delete(self, key):
'''Removes the object from all underlying datastores.'''
for store in self._stores:
store.delete(key)
def query(self, query):
'''Returns a sequence of objects matching criteria expressed in `query`.
The last datastore will handle all query calls, as it has a (if not
the only) complete record of all objects.
'''
# queries hit the last (most complete) datastore
return self._stores[-1].query(query)
def contains(self, key):
'''Returns whether the object is in this datastore.'''
for store in self._stores:
if store.contains(key):
return True
return False
class ShardedDatastore(DatastoreCollection):
'''Represents a collection of datastore shards.
A datastore is selected based on a sharding function.
Sharding functions should take a Key and return an integer.
WARNING: adding or removing datastores while mid-use may severely affect
consistency. Also ensure the order is correct upon initialization.
While this is not as important for caches, it is crucial for
persistent datastores.
'''
def __init__(self, stores=[], shardingfn=hash):
'''Initialize the datastore with any provided datastore.'''
if not callable(shardingfn):
raise TypeError('shardingfn (type %s) is not callable' % type(shardingfn))
super(ShardedDatastore, self).__init__(stores)
self._shardingfn = shardingfn
def shard(self, key):
'''Returns the shard index to handle `key`, according to sharding fn.'''
return self._shardingfn(key) % len(self._stores)
def shardDatastore(self, key):
'''Returns the shard to handle `key`.'''
return self.datastore(self.shard(key))
def get(self, key):
'''Return the object named by key from the corresponding datastore.'''
return self.shardDatastore(key).get(key)
def put(self, key, value):
'''Stores the object to the corresponding datastore.'''
self.shardDatastore(key).put(key, value)
def delete(self, key):
'''Removes the object from the corresponding datastore.'''
self.shardDatastore(key).delete(key)
def contains(self, key):
'''Returns whether the object is in this datastore.'''
return self.shardDatastore(key).contains(key)
def query(self, query):
'''Returns a sequence of objects matching criteria expressed in `query`'''
cursor = Cursor(query, self.shard_query_generator(query))
cursor.apply_order() # ordering sharded queries is expensive (no generator)
return cursor
def shard_query_generator(self, query):
'''A generator that queries each shard in sequence.'''
shard_query = query.copy()
for shard in self._stores:
# yield all items matching within this shard
cursor = shard.query(shard_query)
for item in cursor:
yield item
# update query with results of first query
shard_query.offset = max(shard_query.offset - cursor.skipped, 0)
if shard_query.limit:
shard_query.limit = max(shard_query.limit - cursor.returned, 0)
if shard_query.limit <= 0:
break # we're already done!
'''
Hello Tiered Access
>>> import pymongo
>>> import datastore.core
>>>
>>> from datastore.impl.mongo import MongoDatastore
>>> from datastore.impl.lrucache import LRUCache
>>> from datastore.impl.filesystem import FileSystemDatastore
>>>
>>> conn = pymongo.Connection()
>>> mongo = MongoDatastore(conn.test_db)
>>>
>>> cache = LRUCache(1000)
>>> fs = FileSystemDatastore('/tmp/.test_db')
>>>
>>> ds = datastore.TieredDatastore([cache, mongo, fs])
>>>
>>> hello = datastore.Key('hello')
>>> ds.put(hello, 'world')
>>> ds.contains(hello)
True
>>> ds.get(hello)
'world'
>>> ds.delete(hello)
>>> ds.get(hello)
None
Hello Sharding
>>> import datastore.core
>>>
>>> shards = [datastore.DictDatastore() for i in range(0, 10)]
>>>
>>> ds = datastore.ShardedDatastore(shards)
>>>
>>> hello = datastore.Key('hello')
>>> ds.put(hello, 'world')
>>> ds.contains(hello)
True
>>> ds.get(hello)
'world'
>>> ds.delete(hello)
>>> ds.get(hello)
None
'''
| 28.53029 | 80 | 0.651706 | 4,416 | 34,379 | 4.998868 | 0.125226 | 0.02487 | 0.014496 | 0.01812 | 0.446116 | 0.402763 | 0.353794 | 0.313658 | 0.260974 | 0.250011 | 0 | 0.002676 | 0.228337 | 34,379 | 1,204 | 81 | 28.553987 | 0.829432 | 0.030862 | 0 | 0.356948 | 0 | 0 | 0.029189 | 0 | 0 | 0 | 0 | 0 | 0.002725 | 0 | null | null | 0.010899 | 0.008174 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8174be4107d534513138717c81ca4815dbd17aaf | 2,760 | py | Python | pommerman/agents/http_agent.py | KaixiangLin/playground | a0eb299f4772bada1c528a881f3bf26404b131aa | [
"Apache-2.0"
] | 2 | 2018-11-10T08:31:13.000Z | 2018-11-13T08:16:45.000Z | pommerman/agents/http_agent.py | KaixiangLin/playground | a0eb299f4772bada1c528a881f3bf26404b131aa | [
"Apache-2.0"
] | null | null | null | pommerman/agents/http_agent.py | KaixiangLin/playground | a0eb299f4772bada1c528a881f3bf26404b131aa | [
"Apache-2.0"
] | null | null | null | '''The HTTP agent - provides observation using http push to remote
agent and expects action in the reply'''
import json
import time
import os
import threading
import requests
from . import BaseAgent
from .. import utility
from .. import characters
class HttpAgent(BaseAgent):
"""The HTTP Agent that connects to a port with a remote agent where the
character runs. It uses the same interface as the docker agent and
is useful for debugging."""
def __init__(self,
port=8080,
host='localhost',
timeout=120,
character=characters.Bomber):
self._port = port
self._host = host
self._timeout = timeout
super(HttpAgent, self).__init__(character)
self._wait_for_remote()
def _wait_for_remote(self):
"""Wait for network service to appear. A timeout of 0 waits forever."""
timeout = self._timeout
backoff = .25
max_backoff = min(timeout, 16)
if timeout:
# time module is needed to calc timeout shared between two exceptions
end = time.time() + timeout
while True:
try:
now = time.time()
if timeout and end < now:
print("Timed out - %s:%s" % (self._host, self._port))
raise
request_url = 'http://%s:%s/ping' % (self._host, self._port)
req = requests.get(request_url)
self._acknowledged = True
return True
except requests.exceptions.ConnectionError as e:
print("ConnectionError: ", e)
backoff = min(max_backoff, backoff * 2)
time.sleep(backoff)
except requests.exceptions.HTTPError as e:
print("HTTPError: ", e)
backoff = min(max_backoff, backoff * 2)
time.sleep(backoff)
def act(self, obs, action_space):
obs_serialized = json.dumps(obs, cls=utility.PommermanJSONEncoder)
request_url = "http://{}:{}/action".format(self._host, self._port)
try:
req = requests.post(
request_url,
timeout=0.15,
json={
"obs":
obs_serialized,
"action_space":
json.dumps(action_space, cls=utility.PommermanJSONEncoder)
})
action = req.json()['action']
except requests.exceptions.Timeout as e:
print('Timeout!')
# TODO: Fix this. It's ugly.
action = [0] * len(action_space.shape)
if len(action) == 1:
action = action[0]
return action
| 34.074074 | 81 | 0.544565 | 298 | 2,760 | 4.916107 | 0.385906 | 0.027304 | 0.024573 | 0.032765 | 0.061433 | 0.061433 | 0.061433 | 0.061433 | 0.061433 | 0.061433 | 0 | 0.011429 | 0.365942 | 2,760 | 80 | 82 | 34.5 | 0.825714 | 0.153623 | 0 | 0.095238 | 0 | 0 | 0.051694 | 0 | 0 | 0 | 0 | 0.0125 | 0 | 1 | 0.047619 | false | 0 | 0.126984 | 0 | 0.222222 | 0.063492 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8174d6a81d47ed944222a745013e7d241d84e72a | 737 | py | Python | cacao_app/event/serializers.py | CacaoMovil/guia-de-cacao-django | 14d18edb76502736f6f31955509c3b413f1f91fc | [
"BSD-3-Clause"
] | 1 | 2016-03-07T17:03:45.000Z | 2016-03-07T17:03:45.000Z | cacao_app/event/serializers.py | CacaoMovil/guia-de-cacao-django | 14d18edb76502736f6f31955509c3b413f1f91fc | [
"BSD-3-Clause"
] | 4 | 2016-04-29T20:48:31.000Z | 2021-06-10T20:39:26.000Z | cacao_app/event/serializers.py | CacaoMovil/guia-de-cacao-django | 14d18edb76502736f6f31955509c3b413f1f91fc | [
"BSD-3-Clause"
] | 3 | 2016-03-04T19:46:45.000Z | 2016-05-11T19:46:00.000Z | # -*- coding: utf-8 -*-
from rest_framework import serializers
from django_countries.serializer_fields import CountryField
from .models import Event, CountryEvent
class CountryEventSerializer(serializers.ModelSerializer):
code = serializers.ReadOnlyField(source='country.code')
name = serializers.SerializerMethodField()
class Meta:
model = CountryEvent
fields = ('code', 'name')
def get_name(self, obj):
return obj.country.name
class EventsSerializer(serializers.ModelSerializer):
events_country = CountryEventSerializer(many=True, read_only=True)
class Meta:
model = Event
fields = (
'name', 'description', 'start', 'end', 'events_country'
)
| 25.413793 | 70 | 0.693351 | 72 | 737 | 7 | 0.569444 | 0.103175 | 0.055556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001712 | 0.207598 | 737 | 28 | 71 | 26.321429 | 0.861301 | 0.028494 | 0 | 0.111111 | 0 | 0 | 0.079832 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.166667 | 0.055556 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
81770013c6cc12c6db69c1cb5d883f8060329eda | 536 | py | Python | main/permissions.py | hellojoshuatonga/notepik | 8f251fe9a689a9be8248d4da6260fe7c8742e3c0 | [
"MIT"
] | null | null | null | main/permissions.py | hellojoshuatonga/notepik | 8f251fe9a689a9be8248d4da6260fe7c8742e3c0 | [
"MIT"
] | null | null | null | main/permissions.py | hellojoshuatonga/notepik | 8f251fe9a689a9be8248d4da6260fe7c8742e3c0 | [
"MIT"
] | null | null | null | # Rest framework
from rest_framework import permissions
class IsAuthorOrReadOnly(permissions.BasePermission):
"""
Object level permission. Check if the requesting user is the author or not. If he/she the author then we will give him/her a read and write permission otherwise ready only
"""
def has_object_permission(self, request, view, obj):
# Check if he requesting for only a get, etc
if request.method in permissions.SAFE_METHODS:
return True
return obj.author == request.user
| 35.733333 | 175 | 0.718284 | 74 | 536 | 5.148649 | 0.675676 | 0.068241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.225746 | 536 | 14 | 176 | 38.285714 | 0.918072 | 0.429104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.833333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
8184c1d8dc29034b686437e80c0929c8f140a87c | 262 | py | Python | dpauth/admin.py | askmeaboutlo0m/website | 3df97d061a425e7fbb3f173c78ff01d831575aa0 | [
"MIT"
] | 9 | 2017-06-04T15:46:05.000Z | 2021-09-04T23:28:03.000Z | dpauth/admin.py | askmeaboutlo0m/website | 3df97d061a425e7fbb3f173c78ff01d831575aa0 | [
"MIT"
] | 24 | 2018-02-10T04:29:00.000Z | 2021-10-01T16:01:04.000Z | dpauth/admin.py | askmeaboutlo0m/website | 3df97d061a425e7fbb3f173c78ff01d831575aa0 | [
"MIT"
] | 4 | 2020-03-23T03:42:32.000Z | 2022-03-16T17:01:09.000Z | from django.contrib import admin
from . import models
@admin.register(models.Username)
class UsernameAdmin(admin.ModelAdmin):
list_display = ('user', 'name', 'is_mod')
readonly_fields = ('normalized_name',)
search_fields = ('user__email', 'name')
| 23.818182 | 45 | 0.717557 | 31 | 262 | 5.83871 | 0.709677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.145038 | 262 | 10 | 46 | 26.2 | 0.808036 | 0 | 0 | 0 | 0 | 0 | 0.168582 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.857143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
8188e19b101be322e95cf844a7e3d5f16f246e15 | 346 | py | Python | iptv_proxy/providers/beast/json_api.py | sfanous/IPTVProxy | 23047be01a229ef8f69ea6ca55185eae93adc56e | [
"MIT"
] | 9 | 2018-11-02T02:51:50.000Z | 2022-01-12T06:22:33.000Z | iptv_proxy/providers/beast/json_api.py | sfanous/IPTVProxy | 23047be01a229ef8f69ea6ca55185eae93adc56e | [
"MIT"
] | 3 | 2019-05-11T21:28:32.000Z | 2020-04-27T00:58:46.000Z | iptv_proxy/providers/beast/json_api.py | sfanous/IPTVProxy | 23047be01a229ef8f69ea6ca55185eae93adc56e | [
"MIT"
] | 7 | 2019-01-03T20:31:30.000Z | 2022-01-29T04:09:24.000Z | import logging
from iptv_proxy.providers.beast.constants import BeastConstants
from iptv_proxy.providers.iptv_provider.json_api import ProviderConfigurationJSONAPI
logger = logging.getLogger(__name__)
class BeastConfigurationJSONAPI(ProviderConfigurationJSONAPI):
__slots__ = []
_provider_name = BeastConstants.PROVIDER_NAME.lower()
| 26.615385 | 84 | 0.84104 | 34 | 346 | 8.117647 | 0.588235 | 0.057971 | 0.094203 | 0.15942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.098266 | 346 | 12 | 85 | 28.833333 | 0.884615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.428571 | 0 | 0.857143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
8189efb35e8c25b88203a01795c7461668948d95 | 969 | py | Python | src/download.py | stanislawbartkowski/webhdfsdirectory | 8f7110eb573487c845ab0126eb71f038edb5ed41 | [
"Apache-2.0"
] | null | null | null | src/download.py | stanislawbartkowski/webhdfsdirectory | 8f7110eb573487c845ab0126eb71f038edb5ed41 | [
"Apache-2.0"
] | null | null | null | src/download.py | stanislawbartkowski/webhdfsdirectory | 8f7110eb573487c845ab0126eb71f038edb5ed41 | [
"Apache-2.0"
] | null | null | null | """ Main program to launch proc/hdfs.py
"""
import argparse
import logging
from pars import addargs
import sys
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
from proc.hdfs import DIRHDFS
def gettestargs(parser) :
i = "/home/sbartkowski/work/webhdfsdirectory/testdata/inputhdfs.txt"
return parser.parse_args([i,"inimical1","14000","sb","/user/sb","dir1","/tmp/download","--dryrun"])
def getargs(parser) :
return parser.parse_args(sys.argv[1:])
def readargs():
parser = argparse.ArgumentParser(
description='Download HDFS using WEB REST/API')
addargs(parser)
# return gettestargs(parser)
return getargs(parser)
def main():
args = readargs()
T = DIRHDFS(args.host[0], args.port[0], args.user[0],args.regexp,args.dryrun)
T.downloadhdfsdir(args.userdir[0], args.usersubdir[0], args.localdir[0])
if __name__ == "__main__":
# execute only if run as a script
main()
| 25.5 | 103 | 0.700722 | 130 | 969 | 5.146154 | 0.546154 | 0.037369 | 0.050822 | 0.06278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017011 | 0.150671 | 969 | 37 | 104 | 26.189189 | 0.795869 | 0.101135 | 0 | 0.086957 | 0 | 0 | 0.204176 | 0.100928 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.26087 | 0.043478 | 0.565217 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
818d2b5226021a3473fd95143600b3a63ac484e1 | 869 | py | Python | checkov/cloudformation/checks/resource/aws/DocDBAuditLogs.py | niradler/checkov | 2628c6f28a5604efe3877d6eacc3044d2b66b7b1 | [
"Apache-2.0"
] | null | null | null | checkov/cloudformation/checks/resource/aws/DocDBAuditLogs.py | niradler/checkov | 2628c6f28a5604efe3877d6eacc3044d2b66b7b1 | [
"Apache-2.0"
] | 2 | 2022-03-07T07:15:32.000Z | 2022-03-21T07:21:17.000Z | checkov/cloudformation/checks/resource/aws/DocDBAuditLogs.py | niradler/checkov | 2628c6f28a5604efe3877d6eacc3044d2b66b7b1 | [
"Apache-2.0"
] | null | null | null | from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
from checkov.common.parsers.node import DictNode
from checkov.common.models.enums import CheckResult, CheckCategories
class DocDBAuditLogs(BaseResourceCheck):
def __init__(self) -> None:
name = "Ensure DocDB has audit logs enabled"
id = "CKV_AWS_104"
supported_resources = ["AWS::DocDB::DBClusterParameterGroup"]
categories = [CheckCategories.LOGGING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf: DictNode) -> CheckResult:
params = conf.get("Properties", {}).get("Parameters", {})
if params.get("audit_logs") == "enabled":
return CheckResult.PASSED
return CheckResult.FAILED
check = DocDBAuditLogs()
| 36.208333 | 106 | 0.721519 | 91 | 869 | 6.692308 | 0.549451 | 0.054187 | 0.055829 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00419 | 0.176064 | 869 | 23 | 107 | 37.782609 | 0.846369 | 0 | 0 | 0 | 0 | 0 | 0.135788 | 0.040276 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0.0625 | 0.1875 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
8191a9d3234f49c843978a8688358673f859017f | 8,912 | py | Python | tools/tests/skimage_self_test.py | yinquan529/platform-external-skia | 1adfb847fe565e53d2e26e35b04c8dc112b7513a | [
"BSD-3-Clause"
] | 1 | 2016-05-04T10:08:50.000Z | 2016-05-04T10:08:50.000Z | tools/tests/skimage_self_test.py | yinquan529/platform-external-skia | 1adfb847fe565e53d2e26e35b04c8dc112b7513a | [
"BSD-3-Clause"
] | null | null | null | tools/tests/skimage_self_test.py | yinquan529/platform-external-skia | 1adfb847fe565e53d2e26e35b04c8dc112b7513a | [
"BSD-3-Clause"
] | 1 | 2020-01-16T03:34:53.000Z | 2020-01-16T03:34:53.000Z | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Self-test for skimage.
import filecmp
import os
import subprocess
import sys
import tempfile
class BinaryNotFoundException(Exception):
def __str__ (self):
return ("Could not find binary!\n"
"Did you forget to build the tools project?\n"
"Self tests failed")
# Find a path to the binary to use. Iterates through a list of possible
# locations the binary may be.
def PickBinaryPath(base_dir):
POSSIBLE_BINARY_PATHS = [
'out/Debug/skimage',
'out/Release/skimage',
'xcodebuild/Debug/skimage',
'xcodebuild/Release/skimage',
]
for binary in POSSIBLE_BINARY_PATHS:
binary_full_path = os.path.join(base_dir, binary)
if (os.path.exists(binary_full_path)):
return binary_full_path
raise BinaryNotFoundException
# Quit early if two files have different content.
def DieIfFilesMismatch(expected, actual):
if not filecmp.cmp(expected, actual):
print 'Error: file mismatch! expected=%s , actual=%s' % (
expected, actual)
exit(1)
def test_invalid_file(file_dir, skimage_binary):
""" Test the return value of skimage when an invalid file is decoded.
If there is no expectation file, or the file expects a particular
result, skimage should return nonzero indicating failure.
If the file has no expectation, or ignore-failure is set to true,
skimage should return zero indicating success. """
invalid_file = os.path.join(file_dir, "skimage", "input", "bad-images",
"invalid.png")
# No expectations file:
args = [skimage_binary, "--readPath", invalid_file]
result = subprocess.call(args)
if 0 == result:
print "'%s' should have reported failure!" % " ".join(args)
exit(1)
# Directory holding all expectations files
expectations_dir = os.path.join(file_dir, "skimage", "input", "bad-images")
# Expectations file expecting a valid decode:
incorrect_expectations = os.path.join(expectations_dir,
"incorrect-results.json")
args = [skimage_binary, "--readPath", invalid_file,
"--readExpectationsPath", incorrect_expectations]
result = subprocess.call(args)
if 0 == result:
print "'%s' should have reported failure!" % " ".join(args)
exit(1)
# Empty expectations:
empty_expectations = os.path.join(expectations_dir, "empty-results.json")
output = subprocess.check_output([skimage_binary, "--readPath", invalid_file,
"--readExpectationsPath",
empty_expectations],
stderr=subprocess.STDOUT)
if not "Missing" in output:
# Another test (in main()) tests to ensure that "Missing" does not appear
# in the output. That test could be passed if the output changed so
# "Missing" never appears. This ensures that an error is not missed if
# that happens.
print "skimage output changed! This may cause other self tests to fail!"
exit(1)
# Ignore failure:
ignore_expectations = os.path.join(expectations_dir, "ignore-results.json")
output = subprocess.check_output([skimage_binary, "--readPath", invalid_file,
"--readExpectationsPath",
ignore_expectations],
stderr=subprocess.STDOUT)
if not "failures" in output:
# Another test (in main()) tests to ensure that "failures" does not
# appear in the output. That test could be passed if the output changed
# so "failures" never appears. This ensures that an error is not missed
# if that happens.
print "skimage output changed! This may cause other self tests to fail!"
exit(1)
def test_incorrect_expectations(file_dir, skimage_binary):
""" Test that comparing to incorrect expectations fails, unless
ignore-failures is set to true. """
valid_file = os.path.join(file_dir, "skimage", "input",
"images-with-known-hashes",
"1209453360120438698.png")
expectations_dir = os.path.join(file_dir, "skimage", "input",
"images-with-known-hashes")
incorrect_results = os.path.join(expectations_dir,
"incorrect-results.json")
args = [skimage_binary, "--readPath", valid_file, "--readExpectationsPath",
incorrect_results]
result = subprocess.call(args)
if 0 == result:
print "'%s' should have reported failure!" % " ".join(args)
exit(1)
ignore_results = os.path.join(expectations_dir, "ignore-failures.json")
subprocess.check_call([skimage_binary, "--readPath", valid_file,
"--readExpectationsPath", ignore_results])
def main():
# Use the directory of this file as the out directory
file_dir = os.path.abspath(os.path.dirname(__file__))
trunk_dir = os.path.normpath(os.path.join(file_dir, os.pardir, os.pardir))
# Find the binary
skimage_binary = PickBinaryPath(trunk_dir)
print "Running " + skimage_binary
# Generate an expectations file from known images.
images_dir = os.path.join(file_dir, "skimage", "input",
"images-with-known-hashes")
expectations_path = os.path.join(file_dir, "skimage", "output-actual",
"create-expectations", "expectations.json")
subprocess.check_call([skimage_binary, "--readPath", images_dir,
"--createExpectationsPath", expectations_path])
# Make sure the expectations file was generated correctly.
golden_expectations = os.path.join(file_dir, "skimage", "output-expected",
"create-expectations",
"expectations.json")
DieIfFilesMismatch(expected=golden_expectations, actual=expectations_path)
# Tell skimage to read back the expectations file it just wrote, and
# confirm that the images in images_dir match it.
output = subprocess.check_output([skimage_binary, "--readPath", images_dir,
"--readExpectationsPath",
expectations_path],
stderr=subprocess.STDOUT)
# Although skimage succeeded, it would have reported success if the file
# was missing from the expectations file. Consider this a failure, since
# the expectations file was created from this same image. (It will print
# "Missing" in this case before listing the missing expectations).
if "Missing" in output:
print "Expectations file was missing expectations!"
print output
exit(1)
# Again, skimage would succeed if there were known failures (and print
# "failures"), but there should be no failures, since the file just
# created did not include failures to ignore.
if "failures" in output:
print "Image failed!"
print output
exit(1)
test_incorrect_expectations(file_dir=file_dir,
skimage_binary=skimage_binary)
# Generate an expectations file from an empty directory.
empty_dir = tempfile.mkdtemp()
expectations_path = os.path.join(file_dir, "skimage", "output-actual",
"empty-dir", "expectations.json")
subprocess.check_call([skimage_binary, "--readPath", empty_dir,
"--createExpectationsPath", expectations_path])
golden_expectations = os.path.join(file_dir, "skimage", "output-expected",
"empty-dir", "expectations.json")
DieIfFilesMismatch(expected=golden_expectations, actual=expectations_path)
os.rmdir(empty_dir)
# Generate an expectations file from a nonexistent directory.
expectations_path = os.path.join(file_dir, "skimage", "output-actual",
"nonexistent-dir", "expectations.json")
subprocess.check_call([skimage_binary, "--readPath", "/nonexistent/dir",
"--createExpectationsPath", expectations_path])
golden_expectations = os.path.join(file_dir, "skimage", "output-expected",
"nonexistent-dir", "expectations.json")
DieIfFilesMismatch(expected=golden_expectations, actual=expectations_path)
test_invalid_file(file_dir=file_dir, skimage_binary=skimage_binary)
# Done with all tests.
print "Self tests succeeded!"
if __name__ == "__main__":
main()
| 44.78392 | 81 | 0.632518 | 1,003 | 8,912 | 5.490528 | 0.216351 | 0.023969 | 0.032686 | 0.030507 | 0.523334 | 0.488288 | 0.426548 | 0.396586 | 0.370438 | 0.315417 | 0 | 0.005267 | 0.275696 | 8,912 | 198 | 82 | 45.010101 | 0.84787 | 0.204107 | 0 | 0.388889 | 0 | 0 | 0.225972 | 0.059482 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.039683 | null | null | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8195c711df03d29790fdcc4e7f130ef66986f549 | 788 | py | Python | examples/simple_lakehouse/simple_lakehouse/assets.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | 2 | 2021-06-21T17:50:26.000Z | 2021-06-21T19:14:23.000Z | examples/simple_lakehouse/simple_lakehouse/assets.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | 7 | 2022-03-16T06:55:04.000Z | 2022-03-18T07:03:25.000Z | examples/simple_lakehouse/simple_lakehouse/assets.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | 1 | 2021-08-18T17:21:57.000Z | 2021-08-18T17:21:57.000Z | """Asset definitions for the simple_lakehouse example."""
import pandas as pd
from lakehouse import Column, computed_table, source_table
from pyarrow import date32, float64, string
sfo_q2_weather_sample_table = source_table(
path="data", columns=[Column("tmpf", float64()), Column("valid_date", string())],
)
@computed_table(
input_assets=[sfo_q2_weather_sample_table],
columns=[Column("valid_date", date32()), Column("max_tmpf", float64())],
)
def daily_temperature_highs_table(sfo_q2_weather_sample: pd.DataFrame) -> pd.DataFrame:
"""Computes the temperature high for each day"""
sfo_q2_weather_sample["valid_date"] = pd.to_datetime(sfo_q2_weather_sample["valid"])
return sfo_q2_weather_sample.groupby("valid_date").max().rename(columns={"tmpf": "max_tmpf"})
| 41.473684 | 97 | 0.757614 | 108 | 788 | 5.194444 | 0.435185 | 0.053476 | 0.128342 | 0.192513 | 0.163993 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022695 | 0.10533 | 788 | 18 | 98 | 43.777778 | 0.77305 | 0.119289 | 0 | 0 | 0 | 0 | 0.106881 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8197395414f35f5a57891af7ddfab20969d9cd9f | 301 | py | Python | 17-files/read-file-with-try-block.py | johnehunt/Python3Intro | 2a41ce488aac11bb3928ea81e57be1c2c8acdac2 | [
"Apache-2.0"
] | 1 | 2020-11-03T19:46:25.000Z | 2020-11-03T19:46:25.000Z | 14-files/read-file-with-try-block.py | johnehunt/PythonIntroDS | 7e9d5c5494191cd68bc71e140df5fb30290a8da6 | [
"Apache-2.0"
] | null | null | null | 14-files/read-file-with-try-block.py | johnehunt/PythonIntroDS | 7e9d5c5494191cd68bc71e140df5fb30290a8da6 | [
"Apache-2.0"
] | 1 | 2019-09-21T08:24:46.000Z | 2019-09-21T08:24:46.000Z | # Illustrates combining exception / error handling
# with file access
print('Start')
try:
with open('myfile2.txt', 'r') as f:
lines = f.readlines()
for line in lines:
print(line, end='')
except FileNotFoundError as err:
print('oops')
print(err)
print('Done')
| 20.066667 | 50 | 0.61794 | 38 | 301 | 4.894737 | 0.736842 | 0.086022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004444 | 0.252492 | 301 | 14 | 51 | 21.5 | 0.822222 | 0.215947 | 0 | 0 | 0 | 0 | 0.107296 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
819bd18a4722e9a3211561882e51cf2324399bde | 1,693 | py | Python | src/Testing/ZopeTestCase/__init__.py | tseaver/Zope-RFA | 08634f39b0f8b56403a2a9daaa6ee4479ef0c625 | [
"ZPL-2.1"
] | 2 | 2015-12-21T10:34:56.000Z | 2017-09-24T11:07:58.000Z | src/Testing/ZopeTestCase/__init__.py | MatthewWilkes/Zope | 740f934fc9409ae0062e8f0cd6dcfd8b2df00376 | [
"ZPL-2.1"
] | null | null | null | src/Testing/ZopeTestCase/__init__.py | MatthewWilkes/Zope | 740f934fc9409ae0062e8f0cd6dcfd8b2df00376 | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Names exported by the ZopeTestCase package
"""
import ZopeLite as Zope2
import utils
import layer
from ZopeLite import hasProduct
from ZopeLite import installProduct
from ZopeLite import hasPackage
from ZopeLite import installPackage
from ZopeLite import _print
from ZopeTestCase import folder_name
from ZopeTestCase import user_name
from ZopeTestCase import user_password
from ZopeTestCase import user_role
from ZopeTestCase import standard_permissions
from ZopeTestCase import ZopeTestCase
from ZopeTestCase import FunctionalTestCase
from PortalTestCase import portal_name
from PortalTestCase import PortalTestCase
from sandbox import Sandboxed
from functional import Functional
from base import TestCase
from base import app
from base import close
from warnhook import WarningsHook
from unittest import main
from zopedoctest import ZopeDocTestSuite
from zopedoctest import ZopeDocFileSuite
from zopedoctest import FunctionalDocTestSuite
from zopedoctest import FunctionalDocFileSuite
import zopedoctest as doctest
import transaction
import placeless
Zope = Zope2
| 29.189655 | 78 | 0.759598 | 197 | 1,693 | 6.492386 | 0.472081 | 0.087568 | 0.120407 | 0.060985 | 0.046912 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005416 | 0.127584 | 1,693 | 57 | 79 | 29.701754 | 0.860528 | 0.282339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.03125 | 0.96875 | 0 | 0.96875 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
81a35f7c896207540f74045284e195d4e4fb7b21 | 667 | py | Python | Median.py | fatih-iver/Intro-to-Computer-Science-with-Python | 7b8127681415dfd100a0e70fe8a672cec696bbb7 | [
"MIT"
] | null | null | null | Median.py | fatih-iver/Intro-to-Computer-Science-with-Python | 7b8127681415dfd100a0e70fe8a672cec696bbb7 | [
"MIT"
] | null | null | null | Median.py | fatih-iver/Intro-to-Computer-Science-with-Python | 7b8127681415dfd100a0e70fe8a672cec696bbb7 | [
"MIT"
] | null | null | null | # Define a procedure, median, that takes three
# numbers as its inputs, and returns the median
# of the three numbers.
# Make sure your procedure has a return statement.
def bigger(a,b):
if a > b:
return a
else:
return b
def biggest(a,b,c):
return bigger(a,bigger(b,c))
def median(a, b ,c):
if (b >= a and a >= c) or (c >= a and a >= b):
return a
if (a >= b and b >= c) or (c >= b and b >= a):
return b
if (a >= c and c >= b) or (b >= c and c >= a):
return c
print(median(1,2,3))
#>>> 2
print(median(9,3,6))
#>>> 6
print(median(7,8,7))
#>>> 7 | 20.212121 | 51 | 0.493253 | 115 | 667 | 2.86087 | 0.321739 | 0.036474 | 0.024316 | 0.054711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027907 | 0.355322 | 667 | 33 | 52 | 20.212121 | 0.737209 | 0.263868 | 0 | 0.235294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0 | 0.058824 | 0.529412 | 0.176471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
81acfe851d89593a12e5f0cfee315b25fd2a0d5f | 1,636 | py | Python | gap/src/util/data_iterator.py | cosmozhang/autoencoding_parsing | 2e8f4811ca6032f4f89195cd019a4fce4b399dcc | [
"BSD-3-Clause"
] | null | null | null | gap/src/util/data_iterator.py | cosmozhang/autoencoding_parsing | 2e8f4811ca6032f4f89195cd019a4fce4b399dcc | [
"BSD-3-Clause"
] | null | null | null | gap/src/util/data_iterator.py | cosmozhang/autoencoding_parsing | 2e8f4811ca6032f4f89195cd019a4fce4b399dcc | [
"BSD-3-Clause"
] | null | null | null | from collections import OrderedDict, defaultdict
import numpy as np
'''
generate a id to length dic
'''
def gen_sid_len(sentences):
sid2len = OrderedDict()
for i, sent in enumerate(sentences):
sid2len[i] = len(sent)
return sid2len
def batch_slice(data, batch_size):
# data is a list of sentences of the same length
batch_num = int(np.ceil(len(data) / float(batch_size)))
for i in xrange(batch_num):
cur_batch_size = batch_size if i < batch_num - 1 else len(data) - batch_size * i
# cur_batch_size is the end-point of the batch
sents = data[i * batch_size: i * batch_size + cur_batch_size]
yield sents
def data_iter(sents_id2length_dic, batch_size, shuffle=True):
"""
randomly permute data, then sort by source length, and partition into batches
ensure that the length of source sentences in each batch is decreasing
"""
buckets = defaultdict(list)
for (sent_id, sent_len) in sents_id2length_dic.iteritems():
buckets[sent_len].append(sent_id)
batched_data = []
for (sent_len, sent_ids_smlen) in buckets.iteritems():
# sent_ids_smlen is a list of sentences of the same length
if shuffle:
np.random.shuffle(sent_ids_smlen)
# pdb.set_trace()
'''
'extend' expecting a iterable finishes the iteration
'''
batched_data.extend(list(batch_slice(sent_ids_smlen, batch_size)))
if shuffle:
np.random.shuffle(batched_data)
for batch in batched_data:
"""
sent_ids in the same batch are of the same length
"""
yield batch
| 31.461538 | 88 | 0.665037 | 235 | 1,636 | 4.438298 | 0.348936 | 0.094919 | 0.046021 | 0.043145 | 0.1093 | 0.063279 | 0.063279 | 0.063279 | 0.063279 | 0 | 0 | 0.00491 | 0.253056 | 1,636 | 51 | 89 | 32.078431 | 0.848609 | 0.192543 | 0 | 0.076923 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.076923 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81afed5d2a7be68d968744aa55c07d3f1c78d48b | 241,016 | py | Python | output/myresults.py | jacobseiler/rsage | b3b0a3fa3c676eab188991e37d06894396bfc74f | [
"MIT"
] | 1 | 2019-05-23T04:11:32.000Z | 2019-05-23T04:11:32.000Z | output/myresults.py | jacobseiler/rsage | b3b0a3fa3c676eab188991e37d06894396bfc74f | [
"MIT"
] | 7 | 2018-08-17T05:04:57.000Z | 2019-01-16T05:40:16.000Z | output/myresults.py | jacobseiler/rsage | b3b0a3fa3c676eab188991e37d06894396bfc74f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import os
import heapq
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.colors as colors
import matplotlib.cm as cm
from numpy import *
from random import sample, seed, randint
from os.path import getsize as getFileSize
import math
import random
import csv
from cycler import cycler
from io import StringIO
#np.set_printoptions(threshold=np.nan)
from collections import Counter
from matplotlib.colors import LogNorm
from mpl_toolkits.axes_grid1 import AxesGrid
from astropy import units as u
from astropy import cosmology
import matplotlib.ticker as mtick
import PlotScripts
import ReadScripts
import AllVars
import GalaxyPhotoion as photo
import ObservationalData as Obs
import gnedin_analytic as ga
from mpi4py import MPI
import sys
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
AllVars.Set_Params_Kali()
AllVars.Set_Constants()
PlotScripts.Set_Params_Plot()
output_format = ".png"
# For the Tiamat extended results there is a weird hump when calculating the escape fraction.
# This hump occurs at a halo mass of approximately 10.3.
# The calculation of fesc skips this hump range (defined from kink_low to kink_high)
kink_low = 10.3
kink_high = 10.30000001
m_low = 7.0 # We only sum the photons coming from halos within the mass range m_low < Halo Mass < m_high
m_high = 15.0
m_gal_low = 3.0
m_gal_high = 12.0
m_low_SAGE = pow(10, m_low)/1.0e10 * AllVars.Hubble_h
m_high_SAGE = pow(10, m_high)/1.0e10 * AllVars.Hubble_h
bin_width = 0.2
NB = int((m_high - m_low) / bin_width)
NB_gal = int((m_gal_high - m_gal_low) / bin_width)
fej_low = 0.0
fej_high = 1.0
fej_bin_width = 0.05
NB_fej = int((fej_high - fej_low) / fej_bin_width)
def raise_list_power(my_list, n):
return [pow(x, n) for x in my_list]
def raise_power_list(my_list, n):
return [pow(n, x) for x in my_list]
def calculate_beta(MUV, z):
'''
Calculation of the dust attenuation parameter Beta. Fit values are from Bouwens (2015) ApJ 793, 115.
For z = 5 and 6, Bouwens uses a piece-wise linear relationship and a linear relationship for higher redshift. ##
Parameters
----------
MUV : `float'
A value of the absolute magnitude in the UV (generally M1600) in the AB magnitude system.
z : `float'
Redshift the attenuation is calculated at.
Returns
------
beta : `float'
Value of the UV continuum paramaeter beta.
'''
if (z >= 4.5 and z < 5.5): # z = 5 fits.
if (MUV > -18.8):
dB = -0.08
else:
dB = -0.17
B = -2.05
offset = 18.8
elif (z >= 5.5 and z < 6.5): # z = 6 fits.
if (MUV > -18.8):
dB = -0.08
else:
dB = -0.24
B = -2.22
offset = 18.8
elif (z >= 6.5 and z < 7.5): # z = 7 fits.
dB = -0.20
B = -2.05
offset = 19.5
elif (z >= 7.5 and z < 8.5): # z = 8 fits.
dB = -0.15
B = -2.13
offset = 19.5
elif (z >= 8.5 and z < 9.5): # z = 9 fits.
dB = -0.16
B = -2.19
offset = 19.5
elif (z >= 9.5 and z < 10.5): # z = 10 fits.
dB = -0.16
B = -2.16
offset = 19.5
beta = dB * (MUV + offset) + B
return beta
def multiply(array):
'''
Performs element wise multiplication.
Parameters
----------
array : `~numpy.darray'
The array to be multiplied.
Returns
-------
total : `float'
Total of the elements multiplied together.
'''
total = 1
for i in range(0, len(array)):
total *= array[i]
return total
##
def Sum_Log(array):
'''
Performs an element wise sum of an array who's elements are in log-space.
Parameters
----------
array : array
Array with elements in log-space.
Returns
------
sum_total : float
Value of the elements taken to the power of 10 and summed.
Units
-----
All units are kept the same as the inputs.
'''
sum_total = 0.0
for i in range(0, len(array)):
sum_total += 10**array[i]
return sum_total
##
def Std_Log(array, mean):
'''
Calculates the standard deviation of an array with elements in log-space.
Parameters
----------
array : array
Array with elements in log-space.
mean : float
Mean of the array (not in log).
Returns
------
std : float
Standard deviation of the input array taken to the power of 10.
Units
-----
All units are kept the same as the inputs.
'''
sum_total = 0.0
for i in range(0, len(array)):
sum_total += (10**array[i] - mean)**2
sum_total *= 1.0/len(array)
std = np.sqrt(sum_total)
return std
###
def collect_across_tasks(mean_per_task, std_per_task, N_per_task, SnapList,
BinSnapList=[], binned=False, m_bin_low=0.0,
m_bin_high=0.0, my_bin_width=bin_width):
"""
Reduces arrays that are unique to each task onto the master task.
The dimensions of the input arrays will change slightly if we are collecting a statistics
that is binned across e.g., halo mass or galaxy stellar mass.
Parameters
----------
mean_per_task, std_per_task, N_per_task: Nested 2D (or 3D if binned == True) arrays of floats.
Outer length is equal to the number of models.
Inner length is equal to the number of snapshots the data has been calculated for.
Most inner length is equal to the number of bins.
Contains the mean/standard deviation/number of objects unique for each task.
SnapList: Nested 2D arrays of integers. Outer length is equal to the number of models.
Contains the snapshot numbers the data has been calculated for each model.
BinSnapList: Nested 2D arrays of integers. Outer length is equal to the number of models.
Often statistics are calculated for ALL snapshots but we only wish to plot for a subset of snapshots.
This variable allows the binned data to be collected for only a subset of the snapshots.
binned: Boolean.
Dictates whether the collected data is a 2D or 3D array with the inner-most array being binned across e.g., halo mass.
Returns
----------
master_mean, master_std, master_N: Nested 2D (or 3D if binned == True) arrays of floats.
Shape is identical to the input mean_per_task etc.
If rank == 0 these contain the collected statistics.
Otherwise these will be none.
master_bin_middle: Array of floats.
Contains the location of the middle of the bins for the data.
"""
master_mean = []
master_std = []
master_N = []
master_bin_middle = []
for model_number in range(0, len(SnapList)):
master_mean.append([])
master_std.append([])
master_N.append([])
master_bin_middle.append([])
# If we're collecting a binned statistic (e.g., binned across halo mass), then we need to perform the collecting per snapshot.
if binned:
count = 0
for snapshot_idx in range(len(SnapList[model_number])):
if SnapList[model_number][snapshot_idx] == BinSnapList[model_number][count]:
master_mean[model_number], master_std[model_number], master_N[model_number] = calculate_pooled_stats(master_mean[model_number], master_std[model_number], master_N[model_number], mean_per_task[model_number][snapshot_idx], std_per_task[model_number][snapshot_idx], N_per_task[model_number][snapshot_idx])
master_bin_middle[model_number].append(np.arange(m_bin_low,
m_bin_high+my_bin_width,
my_bin_width)[:-1]
+ my_bin_width* 0.5)
count += 1
if count == len(BinSnapList[model_number]):
break
else:
master_mean[model_number], master_std[model_number], master_N[model_number] = calculate_pooled_stats(master_mean[model_number], master_std[model_number], master_N[model_number],
mean_per_task[model_number], std_per_task[model_number],
N_per_task[model_number])
if rank == 0:
master_mean[model_number] = master_mean[model_number][0]
master_std[model_number] = master_std[model_number][0]
master_N[model_number] = master_N[model_number][0]
return master_mean, master_std, master_N, master_bin_middle
###
def calculate_pooled_stats(mean_pool, std_pool, N_pool, mean_local, std_local, N_local):
'''
Calculates the pooled mean and standard deviation from multiple processors and appends it to an input array.
Formulae taken from https://en.wikipedia.org/wiki/Pooled_variance
As we only care about these stats on the rank 0 process, we make use of junk inputs/outputs for other ranks.
NOTE: Since the input data may be an array (e.g. pooling the mean/std for a stellar mass function).
Parameters
----------
mean_pool, std_pool, N_pool : array of floats.
Arrays that contain the current pooled means/standard deviation/number of data points (for rank 0) or just a junk input (for other ranks).
mean_local, mean_std : float or array of floats.
The non-pooled mean and standard deviation unique for each process.
N_local : floating point number or array of floating point numbers.
Number of data points used to calculate the mean/standard deviation that is going to be added to the pool.
NOTE: Use floating point here so we can use MPI.DOUBLE for all MPI functions.
Returns
-------
mean_pool, std_pool : array of floats.
Original array with the new pooled mean/standard deviation appended (for rank 0) or the new pooled mean/standard deviation only (for other ranks).
Units
-----
All units are the same as the input.
All inputs MUST BE real-space (not log-space).
'''
if isinstance(mean_local, list) == True:
if len(mean_local) != len(std_local):
print("len(mean_local) = {0} \t len(std_local) = {1}".format(len(mean_local), len(std_local)))
raise ValueError("Lengths of mean_local and std_local should be equal")
if ((type(mean_local).__module__ == np.__name__) == True or (isinstance(mean_local, list) == True)): # Checks to see if we are dealing with arrays.
N_times_mean_local = np.multiply(N_local, mean_local)
N_times_var_local = np.multiply(N_local, np.multiply(std_local, std_local))
N_local = np.array(N_local).astype(float)
N_times_mean_local = np.array(N_times_mean_local).astype(np.float32)
if rank == 0: # Only rank 0 holds the final arrays so only it requires proper definitions.
N_times_mean_pool = np.zeros_like(N_times_mean_local)
N_pool_function = np.zeros_like(N_local)
N_times_var_pool = np.zeros_like(N_times_var_local)
N_times_mean_pool = N_times_mean_pool.astype(np.float64) # Recast everything to double precision then use MPI.DOUBLE.
N_pool_function = N_pool_function.astype(np.float64)
N_times_var_pool = N_times_var_pool.astype(np.float64)
else:
N_times_mean_pool = None
N_pool_function = None
N_times_var_pool = None
comm.Barrier()
N_times_mean_local = N_times_mean_local.astype(np.float64)
N_local = N_local.astype(np.float64)
N_times_var_local = N_times_var_local.astype(np.float64)
comm.Reduce([N_times_mean_local, MPI.DOUBLE], [N_times_mean_pool, MPI.DOUBLE], op = MPI.SUM, root = 0) # Sum the arrays across processors.
comm.Reduce([N_local, MPI.DOUBLE],[N_pool_function, MPI.DOUBLE], op = MPI.SUM, root = 0)
comm.Reduce([N_times_var_local, MPI.DOUBLE], [N_times_var_pool, MPI.DOUBLE], op = MPI.SUM, root = 0)
else:
N_times_mean_local = N_local * mean_local
N_times_var_local = N_local * std_local * std_local
N_times_mean_pool = comm.reduce(N_times_mean_local, op = MPI.SUM, root = 0)
N_pool_function = comm.reduce(N_local, op = MPI.SUM, root = 0)
N_times_var_pool = comm.reduce(N_times_var_local, op = MPI.SUM, root = 0)
if rank == 0:
mean_pool_function = np.zeros((len(N_pool_function)))
std_pool_function = np.zeros((len(N_pool_function)))
for i in range(0, len(N_pool_function)):
if N_pool_function[i] == 0:
mean_pool_function[i] = 0.0
else:
mean_pool_function[i] = np.divide(N_times_mean_pool[i], N_pool_function[i])
if N_pool_function[i] < 3:
std_pool_function[i] = 0.0
else:
std_pool_function[i] = np.sqrt(np.divide(N_times_var_pool[i], N_pool_function[i]))
mean_pool.append(mean_pool_function)
std_pool.append(std_pool_function)
N_pool.append(N_pool_function)
return mean_pool, std_pool, N_pool
else:
return mean_pool, std_pool, N_pool_function # Junk return because non-rank 0 doesn't care.
##
def StellarMassFunction(SnapList, SMF, simulation_norm, FirstFile, LastFile, NumFile, ResolutionLimit_mean, model_tags, observations, paper_plot, output_tag):
'''
Calculates the stellar mass function for given galaxies with the option to overplot observations by Song et al. (2013) at z = 6, 7, 8 and/or Baldry et al. (2008) at z = 0.1.
Parallel compatible.
NOTE: The plotting assumes the redshifts we are plotting at are (roughly) the same for each model.
Parameters
---------
SnapList : Nested 'array-like`, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots that we plot the stellar mass function at for each model.
SMF : Nested 2-dimensional array, SMF[model_number0][snapshot0] = [bin0galaxies, ..., binNgalaxies], with length equal to the number of bins (NB_gal).
The count of galaxies within each stellar mass bin. Bounds are given by 'm_gal_low' and 'm_gal_high' in bins given by 'bin_width'.
simulation_norm : array with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
5 : Kali
FirstFile, LastFile, NumFile : array of integers with length equal to the number of models.
The file numbers for each model that were read in (defined by the range between [FirstFile, LastFile] inclusive) and the TOTAL number of files for this model (we may only be plotting a subset of the volume).
ResolutionLimit_mean : array of floats with the same shape as SMF.
This is the mean stellar mass for a halo with len (number of N-body simulation particles) between 'stellar_mass_halolen_lower' and 'stellar_mass_halolen_upper'.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
observations : int
Denotes whether we want to overplot observational results.
0 : Don't plot anything.
1 : Plot Song et al. (2016) at z = 6, 7, 8.
2 : Plot Baldry et al. (2008) at z = 0.1.
3 : Plot both of these.
paper_plot : int
Denotes whether we want to split the plotting over three panels (z = 6, 7, 8) for the paper or keep it all to one figure.
output_tag : string
Name of the file that will be generated. File will be saved in the current directory with the output format defined by the 'output_format' variable at the beggining of the file.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Stellar Mass is in units of log10(Msun).
'''
## Empty array initialization ##
title = []
normalization_array = []
redshift_labels = []
counts_array = []
bin_middle_array = []
for model_number in range(0, len(SnapList)):
counts_array.append([])
bin_middle_array.append([])
redshift_labels.append([])
####
for model_number in range(0, len(SnapList)): # Does this for each of the models.
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
box_factor = (LastFile[model_number] - FirstFile[model_number] + 1.0)/(NumFile[model_number]) # This factor allows us to take a sub-volume of the box and scale the results to represent the entire box.
print("We are creating the stellar mass function using {0:.4f} of the box's volume.".format(box_factor))
norm = pow(AllVars.BoxSize,3) / pow(AllVars.Hubble_h, 3) * bin_width * box_factor
normalization_array.append(norm)
####
for snapshot_idx in range(0, len(SnapList[model_number])): # Loops for each snapshot in each model.
tmp = 'z = %.2f' %(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]) # Assigns a redshift label.
redshift_labels[model_number].append(tmp)
## We perform the plotting on Rank 0 so only this rank requires the final counts array. ##
if rank == 0:
counts_total = np.zeros_like(SMF[model_number][snapshot_idx])
else:
counts_total = None
comm.Reduce([SMF[model_number][snapshot_idx], MPI.FLOAT], [counts_total, MPI.FLOAT], op = MPI.SUM, root = 0) # Sum all the stellar mass and pass to Rank 0.
if rank == 0:
counts_array[model_number].append(counts_total)
bin_middle_array[model_number].append(np.arange(m_gal_low, m_gal_high+bin_width, bin_width)[:-1] + bin_width * 0.5)
####
## Plotting ##
if rank == 0: # Plot only on rank 0.
if paper_plot == 0:
f = plt.figure()
ax = plt.subplot(111)
for model_number in range(0, len(SnapList)):
for snapshot_idx in range(0, len(SnapList[model_number])):
if model_number == 0: # We assume the redshifts for each model are the same, we only want to put a legend label for each redshift once.
title = redshift_labels[model_number][snapshot_idx]
else:
title = ''
plt.plot(bin_middle_array[model_number][snapshot_idx], counts_array[model_number][snapshot_idx] / normalization_array[model_number], color = PlotScripts.colors[snapshot_idx], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = title, linewidth = PlotScripts.global_linewidth)
#print(np.min(np.log10(ResolutionLimit_mean)))
#ax.axvline(np.max(np.log10(ResolutionLimit_mean)), color = 'k', linewidth = PlotScripts.global_linewidth, linestyle = '--')
#ax.text(np.max(np.log10(ResolutionLimit_mean)) + 0.1, 1e-3, "Resolution Limit", color = 'k')
for model_number in range(0, len(SnapList)): # Place legend labels for each of the models. NOTE: Placed after previous loop for proper formatting of labels.
plt.plot(1e100, 1e100, color = 'k', linestyle = PlotScripts.linestyles[model_number], label = model_tags[model_number], rasterized=True, linewidth = PlotScripts.global_linewidth)
## Adjusting axis labels/limits. ##
plt.yscale('log', nonposy='clip')
plt.axis([6, 11.5, 1e-6, 1e-0])
ax.set_xlabel(r'$\log_{10}\ m_{\mathrm{*}} \:[M_{\odot}]$', fontsize = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\Phi\ [\mathrm{Mpc}^{-3}\: \mathrm{dex}^{-1}]$', fontsize = PlotScripts.global_fontsize)
ax.xaxis.set_minor_locator(plt.MultipleLocator(0.25))
ax.set_xticks(np.arange(6.0, 12.0))
if (observations == 1 or observations == 3): # If we wanted to plot Song.
Obs.Get_Data_SMF()
delta = 0.05
caps = 5
## Song (2016) Plotting ##
plt.errorbar(Obs.Song_SMF_z6[:,0], 10**Obs.Song_SMF_z6[:,1], yerr= (10**Obs.Song_SMF_z6[:,1] - 10**Obs.Song_SMF_z6[:,3], 10**Obs.Song_SMF_z6[:,2] - 10**Obs.Song_SMF_z6[:,1]), xerr = 0.25, capsize = caps, elinewidth = PlotScripts.global_errorwidth, alpha = 1.0, lw=2.0, marker='o', ls='none', label = 'Song 2015, z = 6', color = PlotScripts.colors[0], rasterized=True)
plt.errorbar(Obs.Song_SMF_z7[:,0], 10**Obs.Song_SMF_z7[:,1], yerr= (10**Obs.Song_SMF_z7[:,1] - 10**Obs.Song_SMF_z7[:,3], 10**Obs.Song_SMF_z7[:,2] - 10**Obs.Song_SMF_z7[:,1]), xerr = 0.25, capsize = caps, alpha=0.75, elinewidth = PlotScripts.global_errorwidth, lw=1.0, marker='o', ls='none', label = 'Song 2015, z = 7', color = PlotScripts.colors[1], rasterized=True)
plt.errorbar(Obs.Song_SMF_z8[:,0], 10**Obs.Song_SMF_z8[:,1], yerr= (10**Obs.Song_SMF_z8[:,1] - 10**Obs.Song_SMF_z8[:,3], 10**Obs.Song_SMF_z8[:,2] - 10**Obs.Song_SMF_z8[:,1]), xerr = 0.25, capsize = caps, alpha=0.75, elinewidth = PlotScripts.global_errorwidth, lw=1.0, marker='o', ls='none', label = 'Song 2015, z = 8', color = PlotScripts.colors[2], rasterized=True)
####
if ((observations == 2 or observations == 3) and rank == 0): # If we wanted to plot Baldry.
Baldry_xval = np.log10(10 ** Obs.Baldry_SMF_z0[:, 0] /AllVars.Hubble_h/AllVars.Hubble_h)
Baldry_xval = Baldry_xval - 0.26 # convert back to Chabrier IMF
Baldry_yvalU = (Obs.Baldry_SMF_z0[:, 1]+Obs.Baldry_SMF_z0[:, 2]) * AllVars.Hubble_h*AllVars.Hubble_h*AllVars.Hubble_h
Baldry_yvalL = (Obs.Baldry_SMF_z0[:, 1]-Obs.Baldry_SMF_z0[:, 2]) * AllVars.Hubble_h*AllVars.Hubble_h*AllVars.Hubble_h
plt.fill_between(Baldry_xval, Baldry_yvalU, Baldry_yvalL,
facecolor='purple', alpha=0.25, label='Baldry et al. 2008 (z=0.1)')
####
leg = plt.legend(loc='lower left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile = './%s%s' %(output_tag, output_format)
plt.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close()
if (paper_plot == 1):
fig, ax = plt.subplots(nrows=1, ncols=3, sharex=False, sharey=True, figsize=(16, 6))
delta_fontsize = 0
caps = 5
ewidth = 1.5
for model_number in range(0, len(SnapList)):
for count in range(len(SnapList[model_number])):
w = np.where((counts_array[model_number][count] > 0))[0]
ax[count].plot(bin_middle_array[model_number][count][w], counts_array[model_number][count][w]
/ normalization_array[model_number], color = PlotScripts.colors[model_number],
linestyle = PlotScripts.linestyles[model_number], rasterized = True,
label = r"$\mathbf{SAGE}$", linewidth = PlotScripts.global_linewidth)
tick_locs = np.arange(6.0, 12.0)
ax[count].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs], fontsize = PlotScripts.global_fontsize)
ax[count].set_xlim([6.8, 10.3])
ax[count].tick_params(which = 'both', direction='in',
width = PlotScripts.global_tickwidth)
ax[count].tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax[count].tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
ax[count].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
fontsize = PlotScripts.global_labelsize - delta_fontsize)
ax[count].xaxis.set_minor_locator(plt.MultipleLocator(0.25))
#ax[count].set_xticks(np.arange(6.0, 12.0))
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax[count].spines[axis].set_linewidth(PlotScripts.global_axiswidth)
# Since y-axis is shared, only need to do this once.
ax[0].set_yscale('log', nonposy='clip')
ax[0].set_yticklabels([r"$\mathbf{10^{-5}}$",r"$\mathbf{10^{-5}}$",r"$\mathbf{10^{-4}}$", r"$\mathbf{10^{-3}}$",
r"$\mathbf{10^{-2}}$",r"$\mathbf{10^{-1}}$"])
ax[0].set_ylim([1e-5, 1e-1])
#ax[0].set_ylabel(r'\mathbf{$\log_{10} \Phi\ [\mathrm{Mpc}^{-3}\: \mathrm{dex}^{-1}]}$',
ax[0].set_ylabel(r'$\mathbf{log_{10} \: \Phi\ [Mpc^{-3}\: dex^{-1}]}$',
fontsize = PlotScripts.global_labelsize - delta_fontsize)
Obs.Get_Data_SMF()
PlotScripts.Plot_SMF_z6(ax[0], errorwidth=ewidth, capsize=caps)
PlotScripts.Plot_SMF_z7(ax[1], errorwidth=ewidth, capsize=caps)
PlotScripts.Plot_SMF_z8(ax[2], errorwidth=ewidth, capsize=caps)
####
ax[0].text(0.7, 0.9, r"$\mathbf{z = 6}$", transform = ax[0].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
ax[1].text(0.7, 0.9, r"$\mathbf{z = 7}$", transform = ax[1].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
ax[2].text(0.7, 0.9, r"$\mathbf{z = 8}$", transform = ax[2].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
#leg = ax[0,0].legend(loc=2, bbox_to_anchor = (0.2, -0.5), numpoints=1, labelspacing=0.1)
leg = ax[0].legend(loc='lower left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize - 2)
plt.tight_layout()
outputFile = "{0}_paper{1}".format(output_tag, output_format)
plt.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close()
##
def plot_fesc_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_galaxy_fesc, std_galaxy_fesc, N_galaxy_fesc,
mean_halo_fesc, std_halo_fesc, N_halo_fesc,
ResolutionLimit_mean, model_tags, paper_plots,
mass_global, fesc_global, Ngamma_global, output_tag):
"""
Plots the escape fraction as a function of stellar/halo mass.
Parallel compatible.
Accepts 3D arrays of the escape fraction binned into Stellar Mass bins to plot the escape fraction for multiple models.
Mass units are log(Msun)
Parameters
---------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model.
simulation_norm : array with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
5 : Kali
mean_galaxy_fesc, std_galaxy_fesc, N_galaxy_fesc : Nested 3-dimensional array, mean_galaxy_fesc[model_number0][snapshot0] = [bin0_meanfesc, ..., binN_meanfesc], with length equal to the number of models.
Mean/Standard deviation for fesc in each stellar mass bin, for each [model_number] and [snapshot_number]. N_galaxy_fesc is the number of galaxies placed into each mass bin.
mean_halo_fesc, std_halo_fesc, N_halo_fesc Nested 3-dimensional array, mean_halo_fesc[model_number0][snapshot0] = [bin0_meanfesc, ..., binN_meanfesc], with length equal to the number of models.
Identical to previous except using the halo virial mass for the binning rather than stellar mass.
ResolutionLimit_mean : array of floats with the same shape as mean_galaxy_fesc.
This is the mean stellar mass for a halo with len (number of N-body simulation particles) between 'stellar_mass_halolen_lower' and 'stellar_mass_halolen_upper'.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
paper_plots: Integer.
Flag to denote whether we should plot a full, 4 panel plot for the
RSAGE paper.
output_tag : string
Name of the file that will be generated.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Mass units are log(Msun).
"""
def adjust_stellarmass_plot(ax):
#ax.axhline(0.20, 0, 100, color ='k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
#ax.text(7.8, 0.22, r"$f_\mathrm{esc, base}$", color = 'k',
# size = PlotScripts.global_fontsize)
ax.set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{\langle f_{esc}\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([6.8, 10])
ax.set_ylim([0.05, 0.45])
#ax.axhline(0.35, 0, 100, color ='k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
#ax.text(9.1, 0.37, r"$f_\mathrm{esc} = 0.35$", color = 'k',
# size = PlotScripts.global_fontsize)
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
tick_locs = np.arange(6.0, 11.0)
ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
tick_locs = np.arange(0.0, 0.80, 0.10)
ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
'''
labels = ax.yaxis.get_ticklabels()
locs = ax.yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
'''
leg = ax.legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
def adjust_paper_plots(ax, model_tags):
ax[1,0].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[1,1].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[0,0].set_ylabel(r'$\mathbf{\langle f_{esc}\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax[1,0].set_ylabel(r'$\mathbf{\langle f_{esc}\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax_x = [0, 0, 1, 1]
ax_y = [0, 1, 0, 1]
for count, (x, y) in enumerate(zip(ax_x, ax_y)):
ax[x,y].set_xlim([4.8, 10.4])
ax[x,y].set_ylim([0.00, 0.68])
ax[x,y].yaxis.set_major_locator(mtick.MultipleLocator(0.1))
ax[x,y].xaxis.set_major_locator(mtick.MultipleLocator(1.0))
ax[x,y].yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax[x,y].xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax[x,y].tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax[x,y].tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax[x,y].tick_params(which = 'minor',
length = PlotScripts.global_ticklength - 2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax[x,y].spines[axis].set_linewidth(PlotScripts.global_axiswidth)
print(model_tags[count])
label = model_tags[count]
ax[x,y].text(0.05, 0.65, label, transform = ax[x,y].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
tick_locs = np.arange(4.0, 11.0)
ax[1,0].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
ax[1,1].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
tick_locs = np.arange(-0.1, 0.80, 0.10)
ax[0,0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
ax[1,0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
print("x")
labels = ax[1,0].xaxis.get_ticklabels()
locs = ax[1,0].xaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
print("y")
labels = ax[1,0].yaxis.get_ticklabels()
locs = ax[1,0].yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
print("Plotting fesc as a function of stellar mass.")
## Array initialization ##
master_mean_fesc_stellar, master_std_fesc_stellar, master_N_fesc_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_galaxy_fesc, std_galaxy_fesc, N_galaxy_fesc,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
if rank == 0:
if paper_plots == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
else:
fig, ax = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(16, 6))
fig2, ax2 = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(16, 6))
delta_fontsize = 0
caps = 5
ewidth = 1.5
count_x = 0
for count, model_number in enumerate(range(0, len(SnapList))):
if count == 2:
count_x += 1
print("There were a total of {0} galaxies over the entire redshift range.".format(sum(N_halo_fesc[model_number])))
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
plot_count = 0
for snapshot_idx in range(0, len(SnapList[model_number])):
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
if (model_number == 0):
label = r"$\mathbf{z = " + \
str(int(round(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]))) +\
"}$"
else:
label = ""
## Plots as a function of stellar mass ##
w = np.where((master_N_fesc_stellar[model_number][snapshot_idx] < 4))[0] # If there are no galaxies in the bin we don't want to plot.
master_mean_fesc_stellar[model_number][snapshot_idx][w] = np.nan
if paper_plots == 0:
print(master_mean_fesc_stellar[model_number][snapshot_idx])
ax1.plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_fesc_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
else:
ax[count_x, count%2].plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_fesc_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[0],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
#w = np.random.randint(0,
# len(mass_global[model_number][snapshot_idx][0]),
# size=500)
#sc = ax2[count_x, count%2].scatter(mass_global[model_number][snapshot_idx][0][w],
# fesc_global[model_number][snapshot_idx][0][w],
# c=np.log10(Ngamma_global[model_number][snapshot_idx][0][w]*1.0e50),
# alpha = 0.5,cmap='plasma')
#plt.colorbar(sc)
#ax2[count_x, count%2].hexbin(mass_global[model_number][snapshot_idx],
# fesc_global[model_number][snapshot_idx],
# C=Ngamma_global[model_number][snapshot_idx])
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
## Stellar Mass plots ##
if paper_plots == 0:
adjust_stellarmass_plot(ax1)
else:
adjust_paper_plots(ax, model_tags)
leg = ax[0,0].legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
plt.tight_layout()
plt.subplots_adjust(wspace = 0.0, hspace = 0.0)
#leg = ax2[0,0].legend(loc="upper right", numpoints=1, labelspacing=0.1)
#leg.draw_frame(False) # Don't want a box frame
#for t in leg.get_texts(): # Reduce the size of the text
# t.set_fontsize('medium')
plt.tight_layout()
plt.subplots_adjust(wspace = 0.0, hspace = 0.0)
## Output ##
outputFile = './%s%s' %(output_tag, output_format)
fig.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig)
if paper_plots == 1:
outputFile = './%s_scatter%s' %(output_tag, output_format)
fig2.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig2)
##
def plot_reionmod_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_galaxy_reionmod, std_galaxy_reionmod, N_galaxy_reionmod,
mean_galaxy_reionmod_gnedin, std_galaxy_reionmod_gnedin,
model_tags, paper_plots, output_tag):
"""
"""
def adjust_paper_plots(ax, model_tags):
ax[1,0].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[1,1].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[0,0].set_ylabel(r'$\mathbf{\langle ReionMod\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax[1,0].set_ylabel(r'$\mathbf{\langle ReionMod\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax_x = [0, 0, 1, 1]
ax_y = [0, 1, 0, 1]
for count, (x, y) in enumerate(zip(ax_x, ax_y)):
ax[x,y].set_xlim([4.8, 10.4])
ax[x,y].set_ylim([0.00, 1.05])
#ax[x,y].yaxis.set_major_locator(mtick.MultipleLocator(0.1))
ax[x,y].xaxis.set_major_locator(mtick.MultipleLocator(1.0))
#ax[x,y].yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax[x,y].xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax[x,y].tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax[x,y].tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax[x,y].tick_params(which = 'minor',
length = PlotScripts.global_ticklength - 2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax[x,y].spines[axis].set_linewidth(PlotScripts.global_axiswidth)
print(model_tags[count])
label = model_tags[count]
ax[x,y].text(0.05, 0.65, label, transform = ax[x,y].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
tick_locs = np.arange(4.0, 11.0)
ax[1,0].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
ax[1,1].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(-0.1, 0.80, 0.10)
#ax[0,0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
#fontsize = PlotScripts.global_fontsize)
#ax[1,0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
def adjust_redshift_panels(ax, redshift_tags):
ax[1,0].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[1,1].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[0,0].set_ylabel(r'$\mathbf{\langle ReionMod\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax[1,0].set_ylabel(r'$\mathbf{\langle ReionMod\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax_x = [0, 0, 1, 1]
ax_y = [0, 1, 0, 1]
for count, (x, y) in enumerate(zip(ax_x, ax_y)):
ax[x,y].set_xlim([4.8, 10.4])
ax[x,y].set_ylim([0.00, 1.05])
#ax[x,y].yaxis.set_major_locator(mtick.MultipleLocator(0.1))
ax[x,y].xaxis.set_major_locator(mtick.MultipleLocator(1.0))
#ax[x,y].yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax[x,y].xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax[x,y].tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax[x,y].tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax[x,y].tick_params(which = 'minor',
length = PlotScripts.global_ticklength - 2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax[x,y].spines[axis].set_linewidth(PlotScripts.global_axiswidth)
label = redshift_tags[count]
ax[x,y].text(0.05, 0.65, label, transform = ax[x,y].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
tick_locs = np.arange(4.0, 11.0)
ax[1,0].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
ax[1,1].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
print("Reionization Modifier as a function of stellar mass.")
## Array initialization ##
master_mean_reionmod_stellar, master_std_reionmod_stellar, master_N_reionmod_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_galaxy_reionmod, std_galaxy_reionmod, N_galaxy_reionmod,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
master_mean_reionmod_gnedin_stellar, master_std_reionmod_gnedin_stellar, master_N_reionmod_gnedin_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_galaxy_reionmod_gnedin, std_galaxy_reionmod_gnedin, N_galaxy_reionmod,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
if rank == 0:
if paper_plots == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
else:
fig, ax = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(16, 6))
fig2, ax2 = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(16, 6))
delta_fontsize = 0
caps = 5
ewidth = 1.5
count_x = 0
for count, model_number in enumerate(range(0, len(SnapList))):
if count == 2:
count_x += 1
plot_count = 0
for snapshot_idx in range(0, len(SnapList[model_number])):
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
if (model_number == 0):
label = r"$\mathbf{z = " + \
str(int(round(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]))) +\
"}$"
else:
label = ""
## Plots as a function of stellar mass ##
w = np.where((master_N_reionmod_stellar[model_number][snapshot_idx] < 4))[0] # If there are no galaxies in the bin we don't want to plot.
master_mean_reionmod_stellar[model_number][snapshot_idx][w] = np.nan
master_mean_reionmod_gnedin_stellar[model_number][snapshot_idx][w] = np.nan
if paper_plots == 0:
ax1.plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_reionmod_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
else:
ax[count_x, count%2].plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_reionmod_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[0],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
ax[count_x, count%2].plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_reionmod_gnedin_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[1],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
z_labels = []
for model_number in range(0, len(SnapList)):
count_x = 0
plot_count = 0
for count, snapshot_idx in enumerate(range(len(SnapList[model_number]))):
if count == 2:
count_x += 1
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
label = model_tags[model_number]
if (model_number == 0):
z_label = r"$\mathbf{z = " + \
str(int(round(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]))) +\
"}$"
z_labels.append(z_label)
## Plots as a function of stellar mass ##
w = np.where((master_N_reionmod_stellar[model_number][snapshot_idx] < 4))[0] # If there are no galaxies in the bin we don't want to plot.
master_mean_reionmod_stellar[model_number][snapshot_idx][w] = np.nan
master_mean_reionmod_gnedin_stellar[model_number][snapshot_idx][w] = np.nan
if (model_number == 0):
print(master_mean_reionmod_stellar[model_number][snapshot_idx])
ax2[count_x, count%2].plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_reionmod_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[model_number],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
if (model_number == 0):
ax2[count_x, count%2].plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_reionmod_gnedin_stellar[model_number][snapshot_idx],
color = 'k',
ls = '--',
rasterized = True, label = "Gnedin",
lw = PlotScripts.global_linewidth)
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
## Stellar Mass plots ##
if paper_plots == 0:
adjust_stellarmass_plot(ax1)
else:
adjust_paper_plots(ax, model_tags)
print(z_labels)
adjust_redshift_panels(ax2, z_labels)
leg = ax[0,0].legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
leg = ax2[0,0].legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
plt.tight_layout()
plt.subplots_adjust(wspace = 0.0, hspace = 0.0)
#leg = ax2[0,0].legend(loc="upper right", numpoints=1, labelspacing=0.1)
#leg.draw_frame(False) # Don't want a box frame
#for t in leg.get_texts(): # Reduce the size of the text
# t.set_fontsize('medium')
plt.tight_layout()
plt.subplots_adjust(wspace = 0.0, hspace = 0.0)
## Output ##
outputFile = "{0}{1}".format(output_tag, output_format)
fig.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig)
outputFile2 = "{0}_redshiftpanels{1}".format(output_tag, output_format)
fig2.savefig(outputFile2, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile2))
plt.close(fig2)
##
def plot_nion_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_Ngamma_galaxy, std_Ngamma_galaxy, N_Ngamma_galaxy,
model_tags, paper_plots, output_tag):
"""
Plots the number of ionizing photons emitted (not necessarily escaped) as a
function of galaxy stellar mass.
Parallel compatible.
Accepts 3D arrays of the escape fraction binned into Stellar Mass bins to plot the escape fraction for multiple models.
Mass units are log(Msun)
Parameters
---------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model.
simulation_norm : array with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
5 : Kali
mean_galaxy_Ngamma, std_galaxy_Ngamma, N_galaxy_Ngamma : Nested
3-dimensional array, mean_galaxy_Ngamma[model_number0][snapshot0] = [bin0_meanNgamma, ..., binN_meanNgamma], with length equal to the number of models.
Mean/Standard deviation for Ngamma in each stellar mass bin, for each
[model_number] and [snapshot_number]. N_galaxy_Ngamma is the number
of galaxies placed into each mass bin.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
paper_plots: Integer.
Flag to denote whether we should plot a full, 4 panel plot for the
RSAGE paper.
output_tag : string
Name of the file that will be generated.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Mass units are log(Msun).
Ngamma units are 1.0e50 photons/s.
"""
def adjust_stellarmass_plot(ax):
#ax.axhline(0.20, 0, 100, color ='k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
#ax.text(7.8, 0.22, r"$f_\mathrm{esc, base}$", color = 'k',
# size = PlotScripts.global_fontsize)
ax.set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{\log_{10}\langle f_{esc} N_\gamma\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([6.8, 10])
#ax.set_ylim([0.05, 0.45])
#ax.axhline(0.35, 0, 100, color ='k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
#ax.text(9.1, 0.37, r"$f_\mathrm{esc} = 0.35$", color = 'k',
# size = PlotScripts.global_fontsize)
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
#ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
tick_locs = np.arange(6.0, 11.0)
ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(0.0, 0.80, 0.10)
#ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
'''
labels = ax.yaxis.get_ticklabels()
locs = ax.yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
'''
leg = ax.legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
def adjust_paper_plots(ax, z_tags):
ax[1,0].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[1,1].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[0,0].set_ylabel(r'$\mathbf{\Sigma log_{10}\langle f_{esc} N_\gamma\rangle_{M_*}}$',
size = PlotScripts.global_labelsize - 10)
ax[1,0].set_ylabel(r'$\mathbf{\Sigma log_{10}\langle f_{esc} N_\gamma\rangle_{M_*}}$',
size = PlotScripts.global_labelsize - 10)
ax_x = [0, 0, 1, 1]
ax_y = [0, 1, 0, 1]
for count, (x, y) in enumerate(zip(ax_x, ax_y)):
ax[x,y].set_xlim([4.8, 10.4])
ax[x,y].set_ylim([47, 55])
#ax[x,y].yaxis.set_major_locator(mtick.MultipleLocator(0.1))
ax[x,y].xaxis.set_major_locator(mtick.MultipleLocator(1.0))
#ax[x,y].yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax[x,y].xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax[x,y].tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax[x,y].tick_params(which = 'major', length = PlotScripts.global_ticklength)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax[x,y].spines[axis].set_linewidth(PlotScripts.global_axiswidth)
print(z_tags[count])
label = r"$\mathbf{z = " + \
str(int(round(float(z_tags[count])))) +\
"}$"
ax[x,y].text(0.7, 0.8, label, transform = ax[x,y].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
tick_locs = np.arange(4.0, 11.0)
ax[1,0].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
ax[1,1].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(0.0, 0.80, 0.10)
#ax[0,0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
#ax[1,0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
print("x")
labels = ax[1,0].xaxis.get_ticklabels()
locs = ax[1,0].xaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
print("y")
labels = ax[1,0].yaxis.get_ticklabels()
locs = ax[1,0].yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
print("Plotting Ngamma*fesc as a function of stellar mass.")
## Array initialization ##
master_mean_Ngamma_stellar, master_std_Ngamma_stellar, master_N_Ngamma_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_Ngamma_galaxy, std_Ngamma_galaxy, N_Ngamma_galaxy,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
if rank == 0:
if paper_plots == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
else:
fig, ax = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(16, 6))
delta_fontsize = 0
caps = 5
ewidth = 1.5
z_tags = np.zeros_like(model_tags, dtype=np.float32)
for model_number in range(0, len(SnapList)):
count_x = 0
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
plot_count = 0
for count, snapshot_idx in enumerate(range(0, len(SnapList[model_number]))):
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
if count == 2:
count_x += 1
label = model_tags[model_number]
z_tags[count] = float(AllVars.SnapZ[SnapList[model_number][snapshot_idx]])
## Plots as a function of stellar mass ##
w = np.where((master_N_Ngamma_stellar[model_number][snapshot_idx] < 4))[0] # If there are no galaxies in the bin we don't want to plot.
master_mean_Ngamma_stellar[model_number][snapshot_idx][w] = np.nan
if paper_plots == 0:
ax1.plot(master_bin_middle_stellar[model_number][snapshot_idx],
np.log10(master_mean_Ngamma_stellar[model_number][snapshot_idx]*1.0e50),
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
else:
ax[count_x, count%2].plot(master_bin_middle_stellar[model_number][snapshot_idx],
np.log10(master_mean_Ngamma_stellar[model_number][snapshot_idx]*1.0e50),
color = PlotScripts.colors[model_number],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
## Stellar Mass plots ##
if paper_plots == 0:
adjust_stellarmass_plot(ax1)
else:
adjust_paper_plots(ax, z_tags)
leg = ax[0,0].legend(loc="upper left", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
plt.tight_layout()
plt.subplots_adjust(wspace = 0.0, hspace = 0.0)
## Output ##
outputFile = './%s%s' %(output_tag, output_format)
fig.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig)
##
def plot_photo_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_photo_galaxy, std_photo_galaxy, N_photo_galaxy,
model_tags, paper_plots, output_tag):
"""
Plots the photoionization rate as a function of galaxy stellar mass.
Parallel compatible.
Accepts 3D arrays of the escape fraction binned into Stellar Mass bins to plot the escape fraction for multiple models.
Mass units are log(Msun)
Parameters
---------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model.
simulation_norm : array with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
5 : Kali
mean_photo_galaxy, std_photo_galaxy, N_photo_galaxy : Nested
3-dimensional array, mean_photo_galaxy[model_number0][snapshot0] =
[bin0_meanphoto, ..., binN_meanphoto], with length equal to the number of models.
Mean/Standard deviation for Photionization Rate in each stellar mass
bin, for each [model_number] and [snapshot_number]. N_photo_galaxy is
the number of galaxies placed into each mass bin.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
paper_plots: Integer.
Flag to denote whether we should plot a full, 4 panel plot for the
RSAGE paper.
output_tag : string
Name of the file that will be generated.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Mass units are log(Msun).
Ngamma units are 1.0e50 photons/s.
"""
def adjust_stellarmass_plot(ax):
ax.set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{log_{10} \: \Gamma \: [s^{-1}}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([4.8, 10])
#ax.set_ylim([0.05, 0.45])
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
#ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
#tick_locs = np.arange(4.0, 11.0)
#ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(0.0, 0.80, 0.10)
#ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
'''
labels = ax.yaxis.get_ticklabels()
locs = ax.yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
'''
leg = ax.legend(loc="lower right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
print("Plotting photoionization rate as a function of stellar mass.")
## Array initialization ##
master_mean_photo_stellar, master_std_photo_stellar, master_N_photo_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_photo_galaxy, std_photo_galaxy, N_photo_galaxy,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
if rank == 0:
if paper_plots == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
else:
pass
for model_number in range(0, len(SnapList)):
count_x = 0
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
plot_count = 0
for count, snapshot_idx in enumerate(range(0, len(SnapList[model_number]))):
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
if (model_number == 0):
label = r"$\mathbf{z = " + \
str(int(round(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]))) +\
"}$"
else:
label = ""
## Plots as a function of stellar mass ##
w = np.where((master_N_photo_stellar[model_number][snapshot_idx] < 4))[0] # If there are no galaxies in the bin we don't want to plot.
master_mean_photo_stellar[model_number][snapshot_idx][w] = np.nan
if paper_plots == 0:
ax1.plot(master_bin_middle_stellar[model_number][snapshot_idx],
np.log10(master_mean_photo_stellar[model_number][snapshot_idx]),
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
else:
pass
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
for model_number in range(0, len(SnapList)):
ax1.plot(np.nan, np.nan, color = 'k',
label = model_tags[model_number],
lw = PlotScripts.global_linewidth,
ls = PlotScripts.linestyles[model_number])
## Stellar Mass plots ##
if paper_plots == 0:
adjust_stellarmass_plot(ax1)
else:
pass
## Output ##
outputFile = './%s%s' %(output_tag, output_format)
fig.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig)
##
##
def plot_sfr_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_galaxy_sfr, std_galaxy_sfr,
mean_galaxy_ssfr, std_galaxy_ssfr,
N_galaxy, model_tags, output_tag):
"""
Plots the specific star formation rate (sSFR) as a function of stellar mass.
Parallel compatible.
Accepts 3D arrays of the sSFR binned into Stellar Mass bins.
Mass units log(Msun).
Parameters
---------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model.
simulation_norm : array with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
5 : Kali
mean_galaxy_ssfr, std_galaxy_ssfr, N_galaxy_ssfr : Nested 3-dimensional array,
mean_galaxy_sfr[model_number0][snapshot0] = [bin0_meanssfr, ..., binN_meanssfr],
with length equal to the number of models.
Mean/Standard deviation for sSFR in each stellar mass bin, for each [model_number] and [snapshot_number].
N_galaxy_fesc is the number of galaxies placed into each mass bin.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
output_tag : string
Name of the file that will be generated.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Mass units are 1e10 Msun (no h).
"""
def adjust_sfr_plot(ax):
ax.set_xlabel(r'$\log_{10}\ M_*\ [M_{\odot}]$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{\langle \mathrm{SFR}\rangle_{M_*}\:[M_\odot\mathrm{yr}^{-1}]}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([4.8, 10])
ax.set_ylim([-3, 2])
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
tick_locs = np.arange(6.0, 11.0)
ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(0.0, 0.80, 0.10)
#ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
labels = ax.yaxis.get_ticklabels()
locs = ax.yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
leg = ax.legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
def adjust_ssfr_plot(ax):
ax.set_xlabel(r'$\log_{10}\ M_*\ [M_{\odot}]$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{\langle\mathrm{sSFR}\rangle_{M_*}\:[\mathrm{yr^{-1}}}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([4.8, 10])
ax.set_ylim([-9, -4])
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.1))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
tick_locs = np.arange(6.0, 11.0)
ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(0.0, 0.80, 0.10)
#ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
labels = ax.yaxis.get_ticklabels()
locs = ax.yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
leg = ax.legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
print("Plotting sSFR as a function of stellar mass.")
## Array initialization ##
master_mean_sfr_stellar, master_std_sfr_stellar, master_N_sfr_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_galaxy_sfr, std_galaxy_sfr, N_galaxy,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
master_mean_ssfr_stellar, master_std_ssfr_stellar, master_N_ssfr_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_galaxy_ssfr, std_galaxy_ssfr, N_galaxy,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
if rank == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
for model_number in range(0, len(SnapList)):
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
plot_count = 0
for snapshot_idx in range(0, len(SnapList[model_number])):
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
if (model_number == 0):
label = r"$\mathbf{z = " + \
str(int(round(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]))) +\
"}$"
else:
label = ""
## Plots as a function of stellar mass ##
ax1.plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_sfr_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
ax2.plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_ssfr_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
#for model_number in range(0, len(SnapList)): # Just plot some garbage to get the legend labels correct.
#ax1.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = model_tags[model_number], linewidth = PlotScripts.global_linewidth)
#ax3.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = model_tags[model_number], linewidth = PlotScripts.global_linewidth)
## Stellar Mass plots ##
adjust_sfr_plot(ax1)
adjust_ssfr_plot(ax2)
## Output ##
outputFile = "./{0}SFR{1}".format(output_tag, output_format)
fig.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
outputFile = "./{0}sSFR{1}".format(output_tag, output_format)
fig2.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig)
##
##
def plot_fej_Ngamma(SnapList, PlotSnapList, simulation_norm,
mean_Ngamma_fej, std_Ngamma_fej,
N_fej, model_tags, output_tag):
def adjust_plot(ax):
ax.set_xlabel(r'$\mathbf{f_\mathrm{ej}}$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{\log_{10}\langle N_\gamma\rangle_{f_{ej}}}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([0.0, 1.0])
#ax.set_ylim([0.05, 0.45])
#ax.axhline(0.35, 0, 100, color ='k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
#ax.text(9.1, 0.37, r"$f_\mathrm{esc} = 0.35$", color = 'k',
# size = PlotScripts.global_fontsize)
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.10))
#ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
#tick_locs = np.arange(6.0, 11.0)
#ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(0.0, 0.80, 0.10)
#ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
labels = ax.xaxis.get_ticklabels()
locs = ax.xaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
leg = ax.legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
## Array initialization ##
master_mean_Ngamma_fej, master_std_Ngamma_fej, master_N_Ngamma_fej, master_bin_middle_fej = \
collect_across_tasks(mean_Ngamma_fej, std_Ngamma_fej, N_fej,
SnapList, PlotSnapList, True, fej_low, fej_high,
fej_bin_width)
if rank == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
for model_number in range(0, len(SnapList)):
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
plot_count = 0
for snapshot_idx in range(0, len(SnapList[model_number])):
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
label = model_tags[model_number]
w = np.where((master_N_Ngamma_fej[model_number][snapshot_idx] < 4))[0] # If there are no galaxies in the bin we don't want to plot.
master_mean_Ngamma_fej[model_number][snapshot_idx][w] = np.nan
ax1.plot(master_bin_middle_fej[model_number][snapshot_idx],
np.log10(master_mean_Ngamma_fej[model_number][snapshot_idx]*1.0e50),
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
#ax1.plot(master_bin_middle_fej[model_number][snapshot_idx],
# np.log10(master_mean_Ngamma_fej[model_number][snapshot_idx]*1.0e50
# * master_N_Ngamma_fej[model_number][snapshot_idx]),
# color = PlotScripts.colors[plot_count],
# ls = PlotScripts.linestyles[model_number],
# rasterized = True, label = label,
#lw = PlotScripts.global_linewidth)
'''
ax2.plot(master_bin_middle_fej[model_number][snapshot_idx],
np.log10(master_N_Ngamma_fej[model_number][snapshot_idx]),
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
'''
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
adjust_plot(ax1)
leg = ax1.legend(loc="upper center", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
plt.tight_layout()
## Output ##
outputFile = './%s%s' %(output_tag, output_format)
fig.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig)
def plot_ejectedfraction(SnapList, PlotSnapList, simulation_norm, mean_mvir_ejected,
std_mvir_ejected, N_ejected, mean_ejected_z,
std_ejected_z, N_z, model_tags, output_tag):
'''
Plots the ejected fraction as a function of the halo mass.
Parallel compatible.
Accepts a 3D array of the ejected fraction so we can plot for multiple models and redshifts.
Parameters
---------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model.
mean_mvir_ejected, std_mvir_ejected, N_ejected : Nested 3-dimensional array, mean_mvir_ejected[model_number0][snapshot0] = [bin0_meanejected, ..., binN_meanejected], with length equal to the number of models.
Mean/Standard deviation for the escape fraction binned into Halo Mass bins. N_ejected is the number of data points in each bin. Bounds are given by 'm_low' and 'm_high' in bins given by 'bin_width'.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
output_tag : string
Name of the file that will be generated.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Halo Mass is in units of log10(Msun).
'''
print("Plotting the Ejected Fraction as a function of halo mass.")
master_mean_ejected_halo, master_std_ejected_halo, master_N_ejected_halo, master_bin_middle_halo = \
collect_across_tasks(mean_mvir_ejected, std_mvir_ejected, N_ejected, SnapList,
PlotSnapList, True, m_low, m_high)
master_mean_ejected_z, master_std_ejected_z, master_N_ejected_z, _ = \
collect_across_tasks(mean_ejected_z, std_ejected_z, N_z, SnapList)
if rank == 0:
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
for model_number in range(0, len(SnapList)):
if(simulation_norm[model_number] == 1):
cosmo = AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
cosmo = AllVars.Set_Params_Tiamat_extended()
elif(simulation_norm[model_number] == 4):
cosmo = AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
cosmo = AllVars.Set_Params_Kali()
for snapshot_idx in range(0, len(PlotSnapList[model_number])):
label = AllVars.SnapZ[PlotSnapList[model_number][snapshot_idx]]
ax1.plot(master_bin_middle_halo[model_number][snapshot_idx],
master_mean_ejected_halo[model_number][snapshot_idx],
color = PlotScripts.colors[snapshot_idx],
linestyle = PlotScripts.linestyles[model_number],
label = label, lw = PlotScripts.global_linewidth)
ax2.plot((AllVars.t_BigBang - AllVars.Lookback_Time[SnapList[model_number]]) * 1.0e3,
master_mean_ejected_z[model_number],
color = PlotScripts.colors[model_number],
label = model_tags[model_number],
ls = PlotScripts.linestyles[model_number],
lw = PlotScripts.global_linewidth)
for model_number in range(0, len(SnapList)): # Just plot some garbage to get the legend labels correct.
ax1.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = model_tags[model_number], linewidth = PlotScripts.global_linewidth)
ax1.set_xlabel(r'$\log_{10}\ M_{\mathrm{vir}}\ [M_{\odot}]$', size = PlotScripts.global_fontsize)
ax1.set_ylabel(r'$\mathrm{Ejected \: Fraction}$', size = PlotScripts.global_fontsize)
ax1.set_xlim([8.0, 12])
ax1.set_ylim([-0.05, 1.0])
ax1.xaxis.set_minor_locator(mtick.MultipleLocator(0.1))
ax1.yaxis.set_minor_locator(mtick.MultipleLocator(0.025))
leg = ax1.legend(loc=1, numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
outputFile = "./{0}{1}".format(output_tag, output_format)
fig1.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig1)
ax2.set_xlabel(r"$\mathbf{Time \: since \: Big \: Bang \: [Myr]}$", fontsize = PlotScripts.global_labelsize)
tick_locs = np.arange(200.0, 1000.0, 100.0)
tick_labels = [r"$\mathbf{%d}$" % x for x in tick_locs]
ax2.xaxis.set_major_locator(mtick.MultipleLocator(100))
ax2.set_xticklabels(tick_labels, fontsize = PlotScripts.global_fontsize)
ax2.set_xlim(PlotScripts.time_xlim)
ax2.set_ylabel(r'$\mathbf{Mean f_{ej}}$', fontsize = PlotScripts.global_labelsize)
ax3 = ax2.twiny()
t_plot = (AllVars.t_BigBang - cosmo.lookback_time(PlotScripts.z_plot).value) * 1.0e3 # Corresponding Time values on the bottom.
z_labels = ["$\mathbf{%d}$" % x for x in PlotScripts.z_plot] # Properly Latex-ize the labels.
ax3.set_xlabel(r"$\mathbf{z}$", fontsize = PlotScripts.global_labelsize)
ax3.set_xlim(PlotScripts.time_xlim)
ax3.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax3.set_xticklabels(z_labels, fontsize = PlotScripts.global_fontsize) # But label them as redshifts.
leg = ax2.legend(loc='lower right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile2 = "./{0}_z{1}".format(output_tag, output_format)
fig2.savefig(outputFile2, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile2))
plt.close(fig2)
##
def plot_mvir_fesc(SnapList, mass_central, fesc, model_tags, output_tag):
title = []
redshift_labels = []
mean_fesc_array = []
std_fesc_array = []
mean_halomass_array = []
std_halomass_array = []
bin_middle_array = []
for model_number in range(0, len(SnapList)):
redshift_labels.append([])
mean_fesc_array.append([])
std_fesc_array.append([])
mean_halomass_array.append([])
std_halomass_array.append([])
bin_middle_array.append([])
print("Plotting fesc against Mvir")
binwidth = 0.1
Frequency = 1
for model_number in range(0, len(SnapList)):
for snapshot_idx in range(0, len(SnapList[model_number])):
print("Doing Snapshot {0}".format(SnapList[model_number][snapshot_idx]))
tmp = 'z = %.2f' %(AllVars.SnapZ[SnapList[model_number][snapshot_idx]])
redshift_labels[model_number].append(tmp)
minimum_mass = np.floor(min(mass_central[model_number][snapshot_idx])) - 10*binwidth
maximum_mass = np.floor(max(mass_central[model_number][snapshot_idx])) + 10*binwidth
minimum_mass = 6.0
maximum_mass = 12.0
binning_minimum = comm.allreduce(minimum_mass, op = MPI.MIN)
binning_maximum = comm.allreduce(maximum_mass, op = MPI.MAX)
halomass_nonlog = [10**x for x in mass_central[model_number][snapshot_idx]]
(mean_fesc, std_fesc, N, bin_middle) = AllVars.Calculate_2D_Mean(mass_central[model_number][snapshot_idx], fesc[model_number][snapshot_idx], binwidth, binning_minimum, binning_maximum)
mean_fesc_array[model_number], std_fesc_array[model_number] = calculate_pooled_stats(mean_fesc_array[model_number], std_fesc_array[model_number], mean_fesc, std_fesc, N)
mean_halomass_array[model_number], std_halomass_array[model_number] = calculate_pooled_stats(mean_halomass_array[model_number], std_halomass_array[model_number], np.mean(halomass_nonlog), np.std(halomass_nonlog), len(mass_central[model_number][snapshot_idx]))
## If want to do mean/etc of halo mass need to update script. ##
bin_middle_array[model_number].append(bin_middle)
mean_halomass_array[model_number] = np.log10(mean_halomass_array[model_number])
if rank == 0:
f = plt.figure()
ax1 = plt.subplot(111)
for model_number in range(0, len(SnapList)):
for snapshot_idx in range(0, len(SnapList[model_number])):
if model_number == 0:
title = redshift_labels[model_number][snapshot_idx]
else:
title = ''
mean = mean_fesc_array[model_number][snapshot_idx]
std = std_fesc_array[model_number][snapshot_idx]
bin_middle = bin_middle_array[model_number][snapshot_idx]
ax1.plot(bin_middle, mean, color = colors[snapshot_idx], linestyle = linestyles[model_number], rasterized = True, label = title)
#ax1.scatter(mean_halomass_array[model_number][snapshot_idx], np.mean(~np.isnan(mean)), color = colors[snapshot_idx], marker = 'o', rasterized = True, s = 40, lw = 3)
if (len(SnapList) == 1):
ax1.fill_between(bin_middle, np.subtract(mean,std), np.add(mean,std), color = colors[snapshot_idx], alpha = 0.25)
ax1.set_xlabel(r'$\log_{10}\ M_{\mathrm{vir}}\ [M_{\odot}]$', size = PlotScripts.global_fontsize)
ax1.set_ylabel(r'$f_\mathrm{esc}$', size = PlotScripts.global_fontsize)
#ax1.set_xlim([8.5, 12])
#ax1.set_ylim([0.0, 1.0])
ax1.xaxis.set_minor_locator(mtick.MultipleLocator(0.1))
# ax1.yaxis.set_minor_locator(mtick.MultipleLocator(0.1))
# ax1.set_yscale('log', nonposy='clip')
# for model_number in range(0, len(SnapList)):
# ax1.plot(1e100, 1e100, color = 'k', ls = linestyles[model_number], label = model_tags[model_number], rasterized=True)
leg = ax1.legend(loc='upper left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
outputFile = './' + output_tag + output_format
plt.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to'.format(outputFile))
plt.close()
##
def plot_mvir_Ngamma(SnapList, mean_mvir_Ngamma, std_mvir_Ngamma, N_Ngamma, model_tags, output_tag,fesc_prescription=None, fesc_normalization=None, fitpath=None):
'''
Plots the number of ionizing photons (pure ngamma times fesc) as a function of halo mass.
Parallel compatible.
The input data has been binned as a function of halo virial mass (Mvir), with the bins defined at the top of the file (m_low, m_high, bin_width).
Accepts 3D arrays to plot ngamma for multiple models.
Parameters
----------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model.
mean_mvir_Ngamma, std_mvir_Ngamma, N_Ngamma : Nested 2-dimensional array, mean_mvir_Ngamma[model_number0][snapshot0] = [bin0_meanNgamma, ..., binN_meanNgamma], with length equal to the number of bins.
Mean/Standard deviation/number of data points in each halo mass (Mvir) bin.
The number of photons is in units of 1.0e50 s^-1.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
output_tag : string
Name of the file that will be generated.
fesc_prescription : int (optional)
If this parameter is defined, we will save the Mvir-Ngamma results in a text file (not needed if not saving).
Number that controls what escape fraction prescription was used to generate the escape fractions.
0 : Constant, fesc = Constant.
1 : Scaling with Halo Mass, fesc = A*Mh^B.
2 : Scaling with ejected fraction, fesc = fej*A + B.
fesc_normalization : float (if fesc_prescription == 0) or `numpy.darray' with length 2 (if fesc_prescription == 1 or == 2) (optional).
If this parameter is defined, we will save the Mvir-Ngamma results in a text file (not needed if not saving).
Parameter not needed if you're not saving the Mvir-Ngamma results.
If fesc_prescription == 0, gives the constant value for the escape fraction.
If fesc_prescription == 1 or == 2, gives A and B with the form [A, B].
fitpath : string (optional)
If this parameter is defined, we will save the Mvir-Ngamma results in a text file (not needed if not saving).
Defines the base path for where we are saving the results.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Ngamma is in units of 1.0e50 s^-1.
'''
print("Plotting ngamma*fesc against the halo mass")
## Array initialization. ##
title = []
redshift_labels = []
mean_ngammafesc_array = []
std_ngammafesc_array = []
mean_halomass_array = []
std_halomass_array = []
bin_middle_array = []
for model_number in range(0, len(SnapList)):
redshift_labels.append([])
mean_ngammafesc_array.append([])
std_ngammafesc_array.append([])
mean_halomass_array.append([])
std_halomass_array.append([])
bin_middle_array.append([])
for model_number in range(0, len(SnapList)):
for snapshot_idx in range(0, len(SnapList[model_number])):
print("Doing Snapshot {0}".format(SnapList[model_number][snapshot_idx]))
tmp = 'z = %.2f' %(AllVars.SnapZ[SnapList[model_number][snapshot_idx]])
redshift_labels[model_number].append(tmp)
N = N_Ngamma[model_number][snapshot_idx]
mean_ngammafesc_array[model_number], std_ngammafesc_array[model_number] = calculate_pooled_stats(mean_ngammafesc_array[model_number], std_ngammafesc_array[model_number], mean_mvir_Ngamma[model_number][snapshot_idx], std_mvir_Ngamma[model_number][snapshot_idx], N) # Collate the values from all processors.
bin_middle_array[model_number].append(np.arange(m_low, m_high+bin_width, bin_width)[:-1] + bin_width * 0.5)
if rank == 0:
f = plt.figure()
ax1 = plt.subplot(111)
for model_number in range(0, len(SnapList)):
count = 0
for snapshot_idx in range(0, len(SnapList[model_number])):
if model_number == 0:
title = redshift_labels[model_number][snapshot_idx]
else:
title = ''
mean = np.zeros((len(mean_ngammafesc_array[model_number][snapshot_idx])), dtype = np.float32)
std = np.zeros((len(mean_ngammafesc_array[model_number][snapshot_idx])), dtype=np.float32)
for i in range(0, len(mean)):
if(mean_ngammafesc_array[model_number][snapshot_idx][i] < 1e-10):
mean[i] = np.nan
std[i] = np.nan
else:
mean[i] = np.log10(mean_ngammafesc_array[model_number][snapshot_idx][i] * 1.0e50) # Remember that the input data is in units of 1.0e50 s^-1.
std[i] = 0.434 * std_ngammafesc_array[model_number][snapshot_idx][i] / mean_ngammafesc_array[model_number][snapshot_idx][i] # We're plotting in log space so the standard deviation is 0.434*log10(std)/log10(mean).
bin_middle = bin_middle_array[model_number][snapshot_idx]
if (count < 4): # Only plot at most 5 lines.
ax1.plot(bin_middle, mean, color = PlotScripts.colors[snapshot_idx], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = title, linewidth = PlotScripts.global_linewidth)
count += 1
## In this block we save the Mvir-Ngamma results to a file. ##
if (fesc_prescription == None or fesc_normalization == None or fitpath == None):
raise ValueError("You've specified you want to save the Mvir-Ngamma results but haven't provided an escape fraction prescription, normalization and base path name")
# Note: All the checks that escape fraction normalization was written correctly were performed in 'calculate_fesc()', hence it will be correct by this point and we don't need to double check.
if (fesc_prescription[model_number] == 0): # Slightly different naming scheme for the constant case (it only has a float for fesc_normalization).
fname = "%s/fesc%d_%.3f_z%.3f.txt" %(fitpath, fesc_prescription[model_number], fesc_normalization[model_number], AllVars.SnapZ[SnapList[model_number][snapshot_idx]])
elif (fesc_prescription[model_number] == 1 or fesc_prescription[model_number] == 2):
fname = "%s/fesc%d_A%.3eB%.3f_z%.3f.txt" %(fitpath, fesc_prescription[model_number], fesc_normalization[model_number][0], fesc_normalization[model_number][1], AllVars.SnapZ[SnapList[model_number][snapshot_idx]])
f = open(fname, "w+")
if not os.access(fname, os.W_OK):
print("The filename is {0}".format(fname))
raise ValueError("Can't write to this file.")
for i in range(0, len(bin_middle)):
f.write("%.4f %.4f %.4f %d\n" %(bin_middle[i], mean[i], std[i], N_Ngamma[model_number][snapshot_idx][i]))
f.close()
print("Wrote successfully to file {0}".format(fname))
##
for model_number in range(0, len(SnapList)): # Just plot some garbage to get the legend labels correct.
ax1.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = model_tags[model_number], linewidth = PlotScripts.global_linewidth)
ax1.set_xlabel(r'$\log_{10}\ M_{\mathrm{vir}}\ [M_{\odot}]$', size = PlotScripts.global_fontsize)
ax1.set_ylabel(r'$\log_{10}\ \dot{N}_\gamma \: f_\mathrm{esc} \: [\mathrm{s}^{-1}]$', size = PlotScripts.global_fontsize)
ax1.set_xlim([8.5, 12])
ax1.xaxis.set_minor_locator(mtick.MultipleLocator(0.1))
leg = ax1.legend(loc='upper left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
outputFile = './' + output_tag + output_format
plt.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to'.format(outputFile))
plt.close()
def bin_Simfast_halos(RedshiftList, SnapList, halopath, fitpath, fesc_prescription, fesc_normalization, GridSize, output_tag):
for model_number in range(0, len(fesc_prescription)):
for halo_z_idx in range(0, len(RedshiftList)):
snapshot_idx = min(range(len(SnapList)), key=lambda i: abs(SnapList[i]-RedshiftList[halo_z_idx])) # This finds the index of the simulation redshift that most closely matches the Halo redshift.
print("Binning Halo redshift {0}".format(RedshiftList[halo_z_idx]))
print("For the Halo redshift {0:.3f} the nearest simulation redshift is {1:.3f}".format(RedshiftList[halo_z_idx], SnapList[snapshot_idx]))
if (fesc_prescription[model_number] == 0):
fname = "%s/fesc%d_%.3f_z%.3f.txt" %(fitpath, fesc_prescription[model_number], fesc_normalization[model_number], AllVars.SnapZ[snapshot_idx])
elif (fesc_prescription[model_number] == 1 or fesc_prescription[model_number] == 2):
fname = "%s/fesc%d_A%.3eB%.3f_z%.3f.txt" %(fitpath, fesc_prescription[model_number], fesc_normalization[model_number][0], fesc_normalization[model_number][1], AllVars.SnapZ[snapshot_idx])
print("Reading in file {0}".format(fname))
## Here we read in the results from the Mvir-Ngamma binning. ##
f = open(fname, 'r')
fit_mvir, fit_mean, fit_std, fit_N = np.loadtxt(f, unpack = True)
f.close()
## Here we read in the halos created by Simfast21 ##
# The data file has the structure:
# long int N_halos
# Then an entry for each halo:
# float Mass
# float x, y, z positions.
# NOTE: The x,y,z positions are the grid indices but are still floats (because Simfast21 is weird like that).
Halodesc_full = [
('Halo_Mass', np.float32),
('Halo_x', np.float32),
('Halo_y', np.float32),
('Halo_z', np.float32)
]
names = [Halodesc_full[i][0] for i in range(len(Halodesc_full))]
formats = [Halodesc_full[i][1] for i in range(len(Halodesc_full))]
Halo_Desc = np.dtype({'names':names, 'formats':formats}, align=True)
fname = "%s/halonl_z%.3f_N%d_L100.0.dat.catalog" %(halopath, RedshiftList[halo_z_idx], GridSize)
f = open(fname, 'rb')
N_Halos = np.fromfile(f, count = 1, dtype = np.long)
Halos = np.fromfile(f, count = N_Halos, dtype = Halo_Desc)
binned_nion = np.zeros((GridSize*GridSize*GridSize), dtype = float32) # This grid will contain the ionizing photons that results from the binning.
binned_Halo_Mass = np.digitize(np.log10(Halos['Halo_Mass']), fit_mvir) # Places the Simfast21 halos into the correct halo mass bins defined by the Mvir-Ngamma results.
binned_Halo_Mass[binned_Halo_Mass == len(fit_mvir)] = len(fit_mvir) - 1 # Fixes up the edge case.
## Fore each Halo we now assign it an ionizing flux. ##
# This flux is determined by drawing a random number from a normal distribution with mean and standard deviation given by the Mvir-Ngamma results.
# NOTE: Remember the Mvir-Ngamma results are in units of log10(s^-1).
fit_nan = 0
for i in range(0, N_Halos):
if(np.isnan(fit_mean[binned_Halo_Mass[i]]) == True or np.isnan(fit_std[binned_Halo_Mass[i]]) == True): # This halo had mass that was not covered by the Mvir-Ngamma fits.
fit_nan += 1
continue
nion_halo = np.random.normal(fit_mean[binned_Halo_Mass[i]], fit_std[binned_Halo_Mass[i]])
## Because of how Simfast21 does their binning, we have some cases where the Halos are technically outside the box. Just fix them up. ##
x_grid = int(Halos['Halo_x'][i])
if x_grid >= GridSize:
x_grid = GridSize - 1
if x_grid < 0:
x_grid = 0
y_grid = int(Halos['Halo_y'][i])
if y_grid >= GridSize:
y_grid = GridSize - 1
if y_grid < 0:
y_grid = 0
z_grid = int(Halos['Halo_z'][i])
if z_grid >= GridSize:
z_grid = GridSize - 1
if z_grid < 0:
z_grid = 0
idx = x_grid * GridSize*GridSize + y_grid * GridSize + z_grid
binned_nion[idx] += pow(10, nion_halo)/1.0e50
# print"We had %d halos (out of %d, so %.4f fraction) that had halo mass that was not covered by the Mvir-Ngamma results." %(fit_nan, N_Halos, float(fit_nan)/float(N_Halos))
# print "There were %d cells with a non-zero ionizing flux." %(len(binned_nion[binned_nion != 0]))
binned_nion = binned_nion.reshape((GridSize,GridSize,GridSize))
cut_slice = 0
cut_width = 512
nion_slice = binned_nion[:,:, cut_slice:cut_slice+cut_width].mean(axis=-1)*1.0e50
ax1 = plt.subplot(211)
im = ax1.imshow(np.log10(nion_slice), interpolation='bilinear', origin='low', extent =[0,AllVars.BoxSize,0,AllVars.BoxSize], cmap = 'Purples', vmin = 48, vmax = 53)
cbar = plt.colorbar(im, ax = ax1)
cbar.set_label(r'$\mathrm{log}_{10}N_{\gamma} [\mathrm{s}^{-1}]$')
ax1.set_xlabel(r'$\mathrm{x} (h^{-1}Mpc)$')
ax1.set_ylabel(r'$\mathrm{y} (h^{-1}Mpc)$')
ax1.set_xlim([0.0, AllVars.BoxSize])
ax1.set_ylim([0.0, AllVars.BoxSize])
title = r"$z = %.3f$" %(RedshiftList[halo_z_idx])
ax1.set_title(title)
ax2 = plt.subplot(212)
w = np.where((Halos['Halo_z'][:] > cut_slice) & (Halos['Halo_z'][:] <= cut_slice + cut_width))[0]
x_plot = Halos['Halo_x'] * float(AllVars.BoxSize)/float(GridSize)
y_plot = Halos['Halo_y'] * float(AllVars.BoxSize)/float(GridSize)
z_plot = Halos['Halo_z'][w] * float(AllVars.BoxSize)/float(GridSize)
ax2.scatter(x_plot[w], y_plot[w], s = 2, alpha = 0.5)
ax2.set_xlabel(r'$\mathrm{x} (h^{-1}Mpc)$')
ax2.set_ylabel(r'$\mathrm{y} (h^{-1}Mpc)$')
ax2.set_xlim([0.0, AllVars.BoxSize])
ax2.set_ylim([0.0, AllVars.BoxSize])
tmp = "z%.3f" %(RedshiftList[halo_z_idx])
plt.tight_layout()
outputFile = './' + output_tag + tmp + output_format
plt.savefig(outputFile) # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close()
def plot_photoncount(SnapList, sum_nion, simulation_norm, FirstFile, LastFile, NumFiles, model_tags, output_tag):
'''
Plots the ionizing emissivity as a function of redshift.
We normalize the emissivity to Mpc^-3 and this function allows the read-in of only a subset of the volume.
Parallel compatible.
Parameters
---------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model, defines the x-axis we plot against.
sum_nion : Nested 1-dimensional array, sum_nion[z0, z1, ..., zn], with length equal to the number of redshifts.
Number of escape ionizing photons (i.e., photon rate times the local escape fraction) at each redshift.
In units of 1.0e50 s^-1.
simulation_norm : array of ints with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
FirstFile, LastFile, NumFile : array of integers with length equal to the number of models.
The file numbers for each model that were read in (defined by the range between [FirstFile, LastFile] inclusive) and the TOTAL number of files for this model (we may only be plotting a subset of the volume).
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
output_tag : string
Name of the file that will be generated.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
sum_nion is in units of 1.0e50 s^-1.
'''
print("Plotting the ionizing emissivity.")
sum_array = []
for model_number in range(0, len(SnapList)):
if(simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
if(simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif(simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
else:
print("Simulation norm was set to {0}.".format(simulation_norm[model_number]))
raise ValueError("This option has been implemented yet. Get your head in the game Jacob!")
sum_array.append([])
for snapshot_idx in range(0, len(SnapList[model_number])):
nion_sum_snapshot = comm.reduce(sum_nion[model_number][snapshot_idx], op = MPI.SUM, root = 0)
if rank == 0:
sum_array[model_number].append(nion_sum_snapshot * 1.0e50 / (pow(AllVars.BoxSize / AllVars.Hubble_h,3) * (float(LastFile[model_number] - FirstFile[model_number] + 1) / float(NumFiles[model_number]))))
if (rank == 0):
ax1 = plt.subplot(111)
for model_number in range(0, len(SnapList)):
if(simulation_norm[model_number] == 0):
cosmo = AllVars.Set_Params_Mysim()
if(simulation_norm[model_number] == 1):
cosmo = AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
cosmo = AllVars.Set_Params_Tiamat_extended()
elif(simulation_norm[model_number] == 4):
cosmo = AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
cosmo = AllVars.Set_Params_Kali()
else:
print("Simulation norm was set to {0}.".format(simulation_norm[model_number]))
raise ValueError("This option has been implemented yet. Get your head in the game Jacob!")
t = np.empty(len(SnapList[model_number]))
for snapshot_idx in range(0, len(SnapList[model_number])):
t[snapshot_idx] = (AllVars.t_BigBang - cosmo.lookback_time(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]).value) * 1.0e3
t = [t for t, N in zip(t, sum_array[model_number]) if N > 1.0]
sum_array[model_number] = [x for x in sum_array[model_number] if x > 1.0]
print("The total number of ionizing photons for model {0} is {1} s^1 Mpc^-3".format(model_number, sum(sum_array[model_number])))
print(np.log10(sum_array[model_number]))
ax1.plot(t, np.log10(sum_array[model_number]), color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[model_number], label = model_tags[model_number], linewidth = PlotScripts.global_linewidth)
#ax1.fill_between(t, np.subtract(mean,std), np.add(mean,std), color = colors[model_number], alpha = 0.25)
ax1.xaxis.set_minor_locator(mtick.MultipleLocator(PlotScripts.time_tickinterval))
#ax1.yaxis.set_minor_locator(mtick.MultipleLocator(0.025))
ax1.set_xlim(PlotScripts.time_xlim)
ax1.set_ylim([48.5, 51.5])
ax2 = ax1.twiny()
t_plot = (AllVars.t_BigBang - cosmo.lookback_time(PlotScripts.z_plot).value) * 1.0e3 # Corresponding Time values on the bottom.
z_labels = ["$%d$" % x for x in PlotScripts.z_plot] # Properly Latex-ize the labels.
ax2.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax2.set_xlim(PlotScripts.time_xlim)
ax2.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax2.set_xticklabels(z_labels) # But label them as redshifts.
ax1.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_fontsize)
ax1.set_ylabel(r'$\sum f_\mathrm{esc}\dot{N}_\gamma \: [\mathrm{s}^{-1}\mathrm{Mpc}^{-3}]$', fontsize = PlotScripts.global_fontsize)
plot_time = 1
bouwens_z = np.arange(6,16) # Redshift range for the observations.
bouwens_t = (AllVars.t_BigBang - cosmo.lookback_time(bouwens_z).value) * 1.0e3 # Corresponding values for what we will plot on the x-axis.
bouwens_1sigma_lower = [50.81, 50.73, 50.60, 50.41, 50.21, 50.00, 49.80, 49.60, 49.39, 49.18] # 68% Confidence Intervals for the ionizing emissitivity from Bouwens 2015.
bouwens_1sigma_upper = [51.04, 50.85, 50.71, 50.62, 50.56, 50.49, 50.43, 50.36, 50.29, 50.23]
bouwens_2sigma_lower = [50.72, 50.69, 50.52, 50.27, 50.01, 49.75, 49.51, 49.24, 48.99, 48.74] # 95% CI.
bouwens_2sigma_upper = [51.11, 50.90, 50.74, 50.69, 50.66, 50.64, 50.61, 50.59, 50.57, 50.55]
if plot_time == 1:
ax1.fill_between(bouwens_t, bouwens_1sigma_lower, bouwens_1sigma_upper, color = 'k', alpha = 0.2)
ax1.fill_between(bouwens_t, bouwens_2sigma_lower, bouwens_2sigma_upper, color = 'k', alpha = 0.4, label = r"$\mathrm{Bouwens \: et \: al. \: (2015)}$")
else:
ax1.fill_between(bouwens_z, bouwens_1sigma_lower, bouwens_1sigma_upper, color = 'k', alpha = 0.2)
ax1.fill_between(bouwens_z, bouwens_2sigma_lower, bouwens_2sigma_upper, color = 'k', alpha = 0.4, label = r"$\mathrm{Bouwens \: et \: al. \: (2015)}$")
# ax1.text(0.075, 0.965, '(a)', horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
ax1.text(350, 50.0, r"$68\%$", horizontalalignment='center', verticalalignment = 'center', fontsize = PlotScripts.global_labelsize)
ax1.text(350, 50.8, r"$95\%$", horizontalalignment='center', verticalalignment = 'center', fontsize = PlotScripts.global_labelsize)
leg = ax1.legend(loc='lower right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
plt.tight_layout()
outputFile = './{0}{1}'.format(output_tag, output_format)
plt.savefig(outputFile) # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close()
##
def plot_singleSFR(galaxies_filepath_array, merged_galaxies_filepath_array, number_snapshots, simulation_norm, model_tags, output_tag):
SFR_gal = []
SFR_ensemble = []
ejected_gal = []
ejected_ensemble = []
infall_gal = []
infall_ensemble = []
ejectedmass_gal = []
ejectedmass_ensemble = []
N_random = 1
ax1 = plt.subplot(111)
# ax3 = plt.subplot(122)
#ax5 = plt.subplot(133)
look_for_alive = 1
#idx_array = [20004, 20005, 20016]
#halonr_array = [7381]
halonr_array = [389106]
#halonr_array = [36885]
for model_number in range(0, len(model_tags)):
if(simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
if(simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
else:
print("Simulation norm was set to {0}.".format(simulation_norm[model_number]))
raise ValueError("This option has been implemented yet. Get your head in the game Jacob!")
SFR_gal.append([])
SFR_ensemble.append([])
ejected_gal.append([])
ejected_ensemble.append([])
infall_gal.append([])
infall_ensemble.append([])
ejectedmass_gal.append([])
ejectedmass_ensemble.append([])
GG, Gal_Desc = ReadScripts.ReadGals_SAGE_DelayedSN(galaxies_filepath_array[model_number], 0, number_snapshots[model_number], comm) # Read in the correct galaxy file.
G_Merged, Merged_Desc = ReadScripts.ReadGals_SAGE_DelayedSN(merged_galaxies_filepath_array[model_number], 0, number_snapshots[model_number], comm) # Also need the merged galaxies.
G = ReadScripts.Join_Arrays(GG, G_Merged, Gal_Desc) # Then join them together for all galaxies that existed at this Redshift.
if look_for_alive == 1:
G.GridHistory[G.GridHistory >= 0] = 1
G.GridHistory[G.GridHistory < 0] = 0
alive = np.sum(G.GridHistory, axis = 1)
# print "The galaxy that was present in the most snapshots is %d which was in %d snaps" %(np.argmax(alive), np.amax(alive))
most_alive = alive.argsort()[-10:][::-1] # Finds the 3 galaxies alive for the most snapshots. Taken from https://stackoverflow.com/questions/6910641/how-to-get-indices-of-n-maximum-values-in-a-numpy-array
# print G.HaloNr[most_alive]
t = np.empty((number_snapshots[model_number]))
for snapshot_idx in range(0, number_snapshots[model_number]):
w = np.where((G.GridHistory[:, snapshot_idx] != -1) & (G.GridStellarMass[:, snapshot_idx] > 0.0) & (G.GridStellarMass[:, snapshot_idx] < 1e5) & (G.GridFoFMass[:, snapshot_idx] >= m_low_SAGE) & (G.GridFoFMass[:, snapshot_idx] <= m_high_SAGE))[0] # Only include those galaxies that existed at the current snapshot, had positive (but not infinite) stellar/Halo mass and Star formation rate.
SFR_ensemble[model_number].append(np.mean(G.GridSFR[w,snapshot_idx]))
ejected_ensemble[model_number].append(np.mean(G.GridOutflowRate[w, snapshot_idx]))
infall_ensemble[model_number].append(np.mean(G.GridInfallRate[w, snapshot_idx]))
t[snapshot_idx] = (t_BigBang - cosmo.lookback_time(AllVars.SnapZ[snapshot_idx]).value) * 1.0e3
for p in range(0, N_random):
random_idx = (np.where((G.HaloNr == halonr_array[p]))[0])[0]
SFR_gal[model_number].append(G.GridSFR[random_idx]) # Remember the star formation rate history of the galaxy.
ejected_gal[model_number].append(G.GridOutflowRate[random_idx])
infall_gal[model_number].append(G.GridInfallRate[random_idx])
ejectedmass_gal[model_number].append(G.GridEjectedMass[random_idx])
#SFR_gal[model_number][p][SFR_gal[model_number][p] < 1.0e-15] = 1
for snapshot_idx in range(0, number_snapshots[model_number]):
if snapshot_idx == 0:
pass
elif(G.GridHistory[random_idx, snapshot_idx] == -1):
SFR_gal[model_number][p][snapshot_idx] = SFR_gal[model_number][p][snapshot_idx - 1]
# SFR_ensemble[model_number] = np.nan_to_num(SFR_ensemble[model_number])
# SFR_ensemble[model_number][SFR_ensemble[model_number] < 1.0e-15] = 1
# ejected_ensemble[model_number][ejected_ensemble[model_number] < 1.0e-15] = 1
ax1.plot(t, SFR_ensemble[model_number], color = PlotScripts.colors[0], linestyle = PlotScripts.linestyles[model_number], label = model_tags[model_number], linewidth = PlotScripts.global_linewidth)
ax1.plot(t, ejected_ensemble[model_number], color = PlotScripts.colors[1], linestyle = PlotScripts.linestyles[model_number], linewidth = PlotScripts.global_linewidth, alpha = 1.0)
#ax5.plot(t, infall_ensemble[model_number], color = PlotScripts.colors[2], linestyle = PlotScripts.linestyles[model_number], linewidth = PlotScripts.global_linewidth, alpha = 1.0)
#ax5.plot(t, ejectedmass_ensemble[model_number], color = PlotScripts.colors[2], linestyle = PlotScripts.linestyles[model_number], linewidth = PlotScripts.global_linewidth, alpha = 1.0)
for p in range(0, N_random):
ax1.plot(t, SFR_gal[model_number][p], color = PlotScripts.colors[0], linestyle = PlotScripts.linestyles[model_number], alpha = 0.5, linewidth = 1)
ax1.plot(t, ejected_gal[model_number][p], color = PlotScripts.colors[1], linestyle = PlotScripts.linestyles[model_number], alpha = 0.5, linewidth = 1)
#ax5.plot(t, infall_gal[model_number][p], color = PlotScripts.colors[2], linestyle = PlotScripts.linestyles[model_number], alpha = 0.5, linewidth = 1)
#ax5.plot(t, ejectedmass_gal[model_number][p], color = PlotScripts.colors[2], linestyle = PlotScripts.linestyles[model_number], alpha = 0.5, linewidth = 1)
#ax1.plot(t, SFR_gal[model_number][p], color = PlotScripts.colors[0], linestyle = PlotScripts.linestyles[model_number], alpha = 1.0, linewidth = 1, label = model_tags[model_number])
#ax1.plot(t, ejected_gal[model_number][p], color = PlotScripts.colors[1], linestyle = PlotScripts.linestyles[model_number], alpha = 1.0, linewidth = 1, label = model_tags[model_number])
ax1.plot(np.nan, np.nan, color = 'r', linestyle = '-', label = "SFR")
ax1.plot(np.nan, np.nan, color = 'b', linestyle = '-', label = "Outflow")
# exit()
#ax1.plot(np.nan, np.nan, color = PlotScripts.colors[0], label = 'SFR')
#ax1.plot(np.nan, np.nan, color = PlotScripts.colors[1], label = 'Outflow')
ax1.set_yscale('log', nonposy='clip')
ax1.set_ylabel(r"$\mathrm{Mass \: Flow} \: [\mathrm{M}_\odot \mathrm{yr}^{-1}]$")
ax1.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_fontsize)
ax1.set_xlim(PlotScripts.time_xlim)
ax1.set_ylim([1e-6, 1e3])
'''
ax3.set_yscale('log', nonposy='clip')
ax3.set_ylabel(r"$\mathrm{Outflow \: Rate} \: [\mathrm{M}_\odot \mathrm{yr}^{-1}]$")
ax3.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_fontsize)
ax3.set_xlim(PlotScripts.time_xlim)
ax3.set_ylim([1e-8, 1e3])
ax5.set_yscale('log', nonposy='clip')
#ax5.set_ylabel(r"$\mathrm{Infall \: Rate} \: [\mathrm{M}_\odot \mathrm{yr}^{-1}]$")
ax5.set_ylabel(r"$\mathrm{Ejected Mass} [\mathrm{M}_\odot]$")
ax5.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_fontsize)
ax5.set_xlim(PlotScripts.time_xlim)
#ax5.set_ylim([1e-8, 1e3])
ax5.set_ylim([1e6, 1e10])
'''
ax2 = ax1.twiny()
#ax4 = ax3.twiny()
#ax6 = ax5.twiny()
t_plot = (t_BigBang - cosmo.lookback_time(PlotScripts.z_plot).value) * 1.0e3 # Corresponding Time values on the bottom.
z_labels = ["$%d$" % x for x in PlotScripts.z_plot] # Properly Latex-ize the labels.
ax2.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax2.set_xlim(PlotScripts.time_xlim)
ax2.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax2.set_xticklabels(z_labels) # But label them as redshifts.
'''
ax4.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax4.set_xlim(PlotScripts.time_xlim)
ax4.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax4.set_xticklabels(z_labels) # But label them as redshifts.
ax6.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax6.set_xlim(PlotScripts.time_xlim)
ax6.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax6.set_xticklabels(z_labels) # But label them as redshifts.
'''
plt.tight_layout()
leg = ax1.legend(loc='lower right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile = './Halo%d_mlow%.2f_%s%s' %(halonr_array[0], m_low_SAGE, output_tag, output_format)
plt.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close()
##
def plot_quasars_count(SnapList, PlotList, N_quasars_z, N_quasars_boost_z, N_gal_z, mean_quasar_activity, std_quasar_activity, N_halo, N_merger_halo, N_gal, N_merger_galaxy, fesc_prescription, simulation_norm, FirstFile, LastFile, NumFile, model_tags, output_tag):
'''
Parameters
---------
SnapList : Nested 'array-like` of ints, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots that we plot the quasar density at for each model.
PlotList : Nested array of ints, PlotList[model_number0]= [plotsnapshot0_model0, ..., plotsnapshotN_model0], with length equal to the number of models.
Snapshots that will be plotted for the quasar activity as a function of halo mass.
N_quasars_z : Nested array of floats, N_quasars_z[model_number0] = [N_quasars_z0, N_quasars_z1, ..., N_quasars_zN]. Outer array has length equal to the number of models, inner array has length equal to length of the model's SnapList.
Number of quasars, THAT WENT OFF, during the given redshift.
N_quasars_boost_z : Nested array of floats, N_quasars_boost_z[model_number0] = [N_quasars_boost_z0, N_quasars_boost_z1, ..., N_quasars_boost_zN]. Outer array has length equal to the number of models, inner array has length equal to length of the model's SnapList.
Number of galaxies that had their escape fraction boosted by quasar activity.
N_gal_z : Nested array of floats, N_gal_z[model_number0] = [N_gal_z0, N_gal_z1, ..., N_gal_zN]. Outer array has length equal to the number of models, inner array has length equal to length of the model's SnapList.
Number of galaxies at each redshift.
mean_quasar_activity, std_quasar_activity : Nested 2-dimensional array of floats, mean_quasar_activity[model_number0][snapshot0] = [bin0quasar_activity, ..., binNquasar_activity]. Outer array has length equal to the number of models, inner array has length equal to the length of the model's snaplist and most inner array has length equal to the number of halo bins (NB).
Mean/std fraction of galaxies that had quasar go off during each snapshot as a function of halo mass.
NOTE : This is for quasars going off, not for galaxies that have their escape fraction being boosted.
fesc_prescription : Array with length equal to the number of models.
Denotes what escape fraction prescription each model used. Quasars are only tracked when fesc_prescription == 3.
simulation_norm : array with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
5 : Kali
FirstFile, LastFile, NumFile : array of integers with length equal to the number of models.
The file numbers for each model that were read in (defined by the range between [FirstFile, LastFile] inclusive) and the TOTAL number of files for this model (we may only be plotting a subset of the volume).
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
output_tag : string
Name of the file that will be generated. File will be saved in the current directory with the output format defined by the 'output_format' variable at the beggining of the file.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
No relevant units.
'''
print("Plotting quasar count/density")
if rank == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax6 = ax1.twinx()
fig2 = plt.figure()
ax3 = fig2.add_subplot(111)
ax5 = ax3.twinx()
fig3 = plt.figure()
ax7 = fig3.add_subplot(111)
fig4 = plt.figure()
ax50 = fig4.add_subplot(111)
fig5 = plt.figure()
ax55 = fig5.add_subplot(111)
fig6 = plt.figure()
ax56 = fig6.add_subplot(111)
mean_quasar_activity_array = []
std_quasar_activity_array = []
N_quasar_activity_array = []
N_gal_halo_array = []
N_gal_array = []
merger_counts_halo_array = []
merger_counts_galaxy_array = []
bin_middle_halo_array = []
bin_middle_galaxy_array = []
for model_number in range(0, len(SnapList)): # Does this for each of the models.
if (fesc_prescription[model_number] != 3): # Want to skip the models that didn't count quasars.
continue
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif (simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
mean_quasar_activity_array.append([])
std_quasar_activity_array.append([])
N_quasar_activity_array.append([])
N_gal_halo_array.append([])
N_gal_array.append([])
merger_counts_halo_array.append([])
merger_counts_galaxy_array.append([])
bin_middle_halo_array.append([])
bin_middle_galaxy_array.append([])
box_factor = (LastFile[model_number] - FirstFile[model_number] + 1.0)/(NumFile[model_number]) # This factor allows us to take a sub-volume of the box and scale the results to represent the entire box.
print("We are plotting the quasar density using {0:.4f} of the box's volume.".format(box_factor))
norm = pow(AllVars.BoxSize,3) / pow(AllVars.Hubble_h, 3) * box_factor
####
## We perform the plotting on Rank 0 so only this rank requires the final counts array. ##
if rank == 0:
quasars_total = np.zeros_like((N_quasars_z[model_number]))
boost_total = np.zeros_like(N_quasars_boost_z[model_number])
gal_count_total = np.zeros_like(N_gal_z[model_number])
else:
quasars_total = None
boost_total = None
gal_count_total = None
N_quasars_tmp = np.array((N_quasars_z[model_number])) # So we can use MPI.Reduce()
comm.Reduce([N_quasars_tmp, MPI.DOUBLE], [quasars_total, MPI.DOUBLE], op = MPI.SUM, root = 0) # Sum the number of quasars and passes back to rank 0.
N_quasars_boost_tmp = np.array(N_quasars_boost_z[model_number]) # So we can use MPI.Reduce()
comm.Reduce([N_quasars_boost_tmp, MPI.DOUBLE], [boost_total, MPI.DOUBLE], op = MPI.SUM, root = 0) # Sum the number of galaxies that had their fesc boosted.
N_gal_tmp = np.array(N_gal_z[model_number]) # So we can use MPI.Reduce()
comm.Reduce([N_gal_tmp, MPI.DOUBLE], [gal_count_total, MPI.DOUBLE], op = MPI.SUM, root = 0) # Sum the number of total galaxies.
for snapshot_idx in range(len(SnapList[model_number])):
mean_quasar_activity_array[model_number], std_quasar_activity_array[model_number], N_quasar_activity_array[model_number] = calculate_pooled_stats(mean_quasar_activity_array[model_number], std_quasar_activity_array[model_number], N_quasar_activity_array[model_number], mean_quasar_activity[model_number][snapshot_idx], std_quasar_activity[model_number][snapshot_idx], N_halo[model_number][snapshot_idx])
if rank == 0:
merger_count_halo_total = np.zeros_like((N_merger_halo[model_number][snapshot_idx]))
N_gal_halo_total = np.zeros_like((N_halo[model_number][snapshot_idx]))
merger_count_galaxy_total = np.zeros_like((N_merger_galaxy[model_number][snapshot_idx]))
N_gal_total = np.zeros_like((N_gal[model_number][snapshot_idx]))
else:
merger_count_halo_total = None
N_gal_halo_total = None
merger_count_galaxy_total = None
N_gal_total = None
comm.Reduce([N_merger_halo[model_number][snapshot_idx], MPI.FLOAT], [merger_count_halo_total, MPI.FLOAT], op = MPI.SUM, root = 0) # Sum all the stellar mass and pass to Rank 0.
comm.Reduce([N_halo[model_number][snapshot_idx], MPI.FLOAT], [N_gal_halo_total, MPI.FLOAT], op = MPI.SUM, root = 0) # Sum all the stellar mass and pass to Rank 0.
comm.Reduce([N_merger_galaxy[model_number][snapshot_idx], MPI.FLOAT], [merger_count_galaxy_total, MPI.FLOAT], op = MPI.SUM, root = 0) # Sum all the stellar mass and pass to Rank 0.
comm.Reduce([N_gal[model_number][snapshot_idx], MPI.FLOAT], [N_gal_total, MPI.FLOAT], op = MPI.SUM, root = 0) # Sum all the stellar mass and pass to Rank 0.
if rank == 0:
merger_counts_halo_array[model_number].append(merger_count_halo_total)
N_gal_halo_array[model_number].append(N_gal_halo_total)
merger_counts_galaxy_array[model_number].append(merger_count_galaxy_total)
N_gal_array[model_number].append(N_gal_total)
bin_middle_halo_array[model_number].append(np.arange(m_low, m_high+bin_width, bin_width)[:-1] + bin_width * 0.5)
bin_middle_galaxy_array[model_number].append(np.arange(m_gal_low, m_gal_high+bin_width, bin_width)[:-1] + bin_width * 0.5)
if rank == 0:
plot_count = 0
stop_plot = 0
title = model_tags[model_number]
t = np.empty(len(SnapList[model_number]))
ZZ = np.empty(len(SnapList[model_number]))
for snapshot_idx in range(0, len(SnapList[model_number])):
t[snapshot_idx] = (AllVars.t_BigBang - AllVars.Lookback_Time[SnapList[model_number][snapshot_idx]]) * 1.0e3
ZZ[snapshot_idx] = AllVars.SnapZ[SnapList[model_number][snapshot_idx]]
if (stop_plot == 0):
# print("Snapshot {0} PlotSnapshot "
#"{1}".format(SnapList[model_number][snapshot_idx], PlotList[model_number][plot_count]))
if (SnapList[model_number][snapshot_idx] == PlotList[model_number][plot_count]):
label = "z = {0:.2f}".format(AllVars.SnapZ[PlotList[model_number][plot_count]])
ax7.plot(bin_middle_halo_array[model_number][snapshot_idx], mean_quasar_activity_array[model_number][snapshot_idx], color = PlotScripts.colors[plot_count], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = label, linewidth = PlotScripts.global_linewidth)
#ax50.plot(bin_middle_halo_array[model_number][snapshot_idx], merger_counts_array[model_number][snapshot_idx] / gal_count_total[snapshot_idx], color = PlotScripts.colors[plot_count], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = label, linewidth = PlotScripts.global_linewidth)
ax50.plot(bin_middle_halo_array[model_number][snapshot_idx], merger_counts_halo_array[model_number][snapshot_idx], color = PlotScripts.colors[plot_count], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = label, linewidth = PlotScripts.global_linewidth)
#ax50.plot(bin_middle_halo_array[model_number][snapshot_idx], merger_counts_array[model_number][snapshot_idx] / N_gal_halo_array[model_number][snapshot_idx], color = PlotScripts.colors[plot_count], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = label, linewidth = PlotScripts.global_linewidth)
#ax55.plot(bin_middle_galaxy_array[model_number][snapshot_idx], merger_counts_galaxy_array[model_number][snapshot_idx], color = PlotScripts.colors[plot_count], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = label, linewidth = PlotScripts.global_linewidth)
ax55.plot(bin_middle_galaxy_array[model_number][snapshot_idx],
merger_counts_galaxy_array[model_number][snapshot_idx] / N_gal_array[model_number][snapshot_idx], color = PlotScripts.colors[plot_count], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = label, linewidth = PlotScripts.global_linewidth)
print("plot_count = {0} len(PlotList) = {1}".format(plot_count,
len(PlotList[model_number])))
plot_count += 1
print("plot_count = {0} len(PlotList) = {1}".format(plot_count,
len(PlotList[model_number])))
if (plot_count == len(PlotList[model_number])):
stop_plot = 1
print("For Snapshot {0} at t {3} there were {1} total mergers compared to {2} total galaxies.".format(snapshot_idx, np.sum(merger_counts_galaxy_array[model_number][snapshot_idx]), np.sum(gal_count_total[snapshot_idx]), t[snapshot_idx]))
if (np.sum(gal_count_total[snapshot_idx]) > 0.0 and np.sum(merger_counts_galaxy_array[model_number][snapshot_idx]) > 0.0):
ax56.scatter(t[snapshot_idx], np.sum(merger_counts_galaxy_array[model_number][snapshot_idx]) / np.sum(gal_count_total[snapshot_idx]), color = 'r', rasterized = True)
#ax56.scatter(t[snapshot_idx], quasars_total[snapshot_idx] / np.sum(gal_count_total[snapshot_idx]), color = 'r', rasterized = True)
ax1.plot(t, quasars_total / norm, color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[0], rasterized = True, linewidth = PlotScripts.global_linewidth)
p = np.where((ZZ < 15))[0]
#ax1.plot(ZZ[p], quasars_total[p] / norm, color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[0], rasterized = True, linewidth = PlotScripts.global_linewidth)
ax3.plot(t, boost_total, color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[0], rasterized = True, label = title, linewidth = PlotScripts.global_linewidth)
w = np.where((gal_count_total > 0.0))[0] # Since we're doing a division, need to only plot those redshifts that actually have galaxies.
ax5.plot(t[w], np.divide(boost_total[w], gal_count_total[w]), color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[1], rasterized = True, linewidth = PlotScripts.global_linewidth)
ax6.plot(t[w], gal_count_total[w] / norm, color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[1], rasterized = True, linewidth = PlotScripts.global_linewidth)
#ax6.plot(ZZ[p], gal_count_total[p] / norm, color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[1], rasterized = True, linewidth = PlotScripts.global_linewidth)
ax1.plot(np.nan, np.nan, color = PlotScripts.colors[0], linestyle = PlotScripts.linestyles[0], label = "Quasar Ejection Density")
ax1.plot(np.nan, np.nan, color = PlotScripts.colors[0], linestyle = PlotScripts.linestyles[1], label = "Galaxy Density")
ax3.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[0], label = "Count")
ax3.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[1], label = "Fraction of Galaxies")
ax7.set_xlabel(r'$\log_{10}\ M_\mathrm{vir}\ [M_{\odot}]$', size = PlotScripts.global_fontsize)
ax7.set_ylabel(r'$\mathrm{Mean \: Quasar \: Activity}$', size = PlotScripts.global_fontsize)
ax50.set_xlabel(r'$\log_{10}\ M_\mathrm{vir}\ [M_{\odot}]$', size = PlotScripts.global_fontsize)
#ax50.set_ylabel(r'$\mathrm{Fraction \: Galaxies \: Undergoing \: Merger}$', size = PlotScripts.global_fontsize)
ax50.set_ylabel(r'$\mathrm{Number \: Galaxies \: Undergoing \: Merger}$', size = PlotScripts.global_fontsize)
ax55.set_xlabel(r'$\log_{10}\ M_\mathrm{*}\ [M_{\odot}]$', size = PlotScripts.global_fontsize)
ax55.set_ylabel(r'$\mathrm{Fraction \: Galaxies \: Undergoing \: Merger}$', size = PlotScripts.global_fontsize)
#ax55.set_ylabel(r'$\mathrm{Number \: Galaxies \: Undergoing \: Merger}$', size = PlotScripts.global_fontsize)
ax56.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_labelsize)
ax56.set_ylabel(r'$\mathrm{Fraction \: Galaxies \: Undergoing \: Merger}$', size = PlotScripts.global_fontsize)
#ax56.set_ylabel(r'$\mathrm{Fraction \: Galaxies \: Quasar \: Activity}$', size = PlotScripts.global_fontsize)
ax56.set_yscale('log', nonposy='clip')
ax50.axvline(np.log10(32.0*AllVars.PartMass / AllVars.Hubble_h), color = 'k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
ax1.xaxis.set_minor_locator(mtick.MultipleLocator(PlotScripts.time_tickinterval))
ax1.set_xlim(PlotScripts.time_xlim)
ax1.set_yscale('log', nonposy='clip')
ax3.xaxis.set_minor_locator(mtick.MultipleLocator(PlotScripts.time_tickinterval))
ax3.set_xlim(PlotScripts.time_xlim)
ax3.set_yscale('log', nonposy='clip')
## Create a second axis at the top that contains the corresponding redshifts. ##
## The redshift defined in the variable 'z_plot' will be displayed. ##
ax2 = ax1.twiny()
ax4 = ax3.twiny()
ax57 = ax56.twiny()
t_plot = (AllVars.t_BigBang - AllVars.cosmo.lookback_time(PlotScripts.z_plot).value) * 1.0e3 # Corresponding time values on the bottom.
z_labels = ["$%d$" % x for x in PlotScripts.z_plot] # Properly Latex-ize the labels.
ax2.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax2.set_xlim(PlotScripts.time_xlim)
ax2.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax2.set_xticklabels(z_labels) # But label them as redshifts.
ax4.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax4.set_xlim(PlotScripts.time_xlim)
ax4.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax4.set_xticklabels(z_labels) # But label them as redshifts.
ax57.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax57.set_xlim(PlotScripts.time_xlim)
ax57.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax57.set_xticklabels(z_labels) # But label them as redshifts.
ax1.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_labelsize)
#ax1.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax1.set_ylabel(r'$N_\mathrm{Quasars} \: [\mathrm{Mpc}^{-3}]$', fontsize = PlotScripts.global_fontsize)
ax6.set_ylabel(r'$N_\mathrm{Gal} \: [\mathrm{Mpc}^{-3}]$', fontsize = PlotScripts.global_fontsize)
ax3.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_labelsize)
ax3.set_ylabel(r'$N_\mathrm{Boosted}$', fontsize = PlotScripts.global_fontsize)
ax5.set_ylabel(r'$\mathrm{Fraction \: Boosted}$', fontsize = PlotScripts.global_fontsize)
leg = ax1.legend(loc='lower right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
leg = ax3.legend(loc='lower left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
leg = ax7.legend(loc='upper left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
leg = ax50.legend(loc='upper right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
leg = ax55.legend(loc='upper right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
fig.tight_layout()
fig2.tight_layout()
fig3.tight_layout()
fig5.tight_layout()
fig6.tight_layout()
outputFile1 = './{0}_quasardensity{1}'.format(output_tag, output_format)
outputFile2 = './{0}_boostedcount{1}'.format(output_tag, output_format)
outputFile3 = './{0}_quasar_activity_halo{1}'.format(output_tag, output_format)
outputFile4 = './{0}_mergercount_global{1}'.format(output_tag, output_format)
outputFile5 = './{0}_mergercount_global_stellarmass{1}'.format(output_tag, output_format)
outputFile6 = './{0}_mergercount_total{1}'.format(output_tag, output_format)
fig.savefig(outputFile1) # Save the figure
fig2.savefig(outputFile2) # Save the figure
fig3.savefig(outputFile3) # Save the figure
fig4.savefig(outputFile4) # Save the figure
fig5.savefig(outputFile5) # Save the figure
fig6.savefig(outputFile6) # Save the figure
print("Saved to {0}".format(outputFile1))
print("Saved to {0}".format(outputFile2))
print("Saved to {0}".format(outputFile3))
print("Saved to {0}".format(outputFile4))
print("Saved to {0}".format(outputFile5))
print("Saved to {0}".format(outputFile6))
plt.close(fig)
plt.close(fig2)
plt.close(fig3)
##
def plot_photon_quasar_fraction(snapshot, filenr, output_tag, QuasarFractionalPhoton, QuasarActivityToggle, NumSubsteps):
ax1 = plt.subplot(111)
counts, bin_edges, bin_middle = AllVars.Calculate_Histogram(QuasarFractionalPhoton, 0.05, 0, 0, 1)
ax1.plot(bin_middle, counts, lw = PlotScripts.global_linewidth, color = 'r')
ax1.axvline(np.mean(QuasarFractionalPhoton[QuasarFractionalPhoton != 0]), lw = 0.5, ls = '-')
ax1.set_yscale('log', nonposy='clip')
ax1.set_xlabel(r"$\mathrm{Fractional \: Photon \: Boost}$")
ax1.set_ylabel(r"$\mathrm{Count}$")
ax1.set_ylim([1e1, 1e5])
outputFile1 = './photonfraction/file{0}_snap{1}_{2}{3}'.format(filenr, snapshot, output_tag, output_format)
plt.tight_layout()
plt.savefig(outputFile1)
print("Saved to {0}".format(outputFile1))
plt.close()
###
def plot_quasar_substep(snapshot, filenr, output_tag, substep):
ax1 = plt.subplot(111)
counts, bin_edges, bin_middle = AllVars.Calculate_Histogram(substep, 0.1, 0, 0, 10)
ax1.plot(bin_middle, counts, lw = PlotScripts.global_linewidth, color = 'r')
ax1.axvline(np.mean(substep[substep != -1]), lw = 0.5, ls = '-')
ax1.set_yscale('log', nonposy='clip')
ax1.set_xlabel(r"$\mathrm{Substep \: Quasar \: Activity}$")
ax1.set_ylabel(r"$\mathrm{Count}$")
# ax1.set_ylim([1e1, 1e5])
outputFile1 = './substep_activity/file{0}_snap{1}_{2}{3}'.format(filenr, snapshot, output_tag, output_format)
plt.tight_layout()
plt.savefig(outputFile1)
print("Saved to {0}".format(outputFile1))
plt.close()
###
def plot_post_quasar_SFR(PlotSnapList, model_number, Gal, output_tag):
ax1 = plt.subplot(111)
ax2 = ax1.twinx()
count = 0
snapshot_thickness = 20 # How many snapshots before/after the quasar event do we want to track?
for snapshot_idx in PlotSnapList[model_number]:
w = np.where((G.QuasarActivity[:, snapshot_idx] == 1) & (G.LenHistory[:, snapshot_idx] > 200.0) & (G.GridStellarMass[:, snapshot_idx] > 0.001))[0]
w_slice_gridhistory = G.GridHistory[w,snapshot_idx-snapshot_thickness:snapshot_idx+snapshot_thickness]
potential_gal = []
for i in range(len(w_slice_gridhistory)):
ww = np.where((w_slice_gridhistory[i] >= 0))[0]
if (len(ww) == snapshot_thickness * 2):
potential_gal.append(w[i])
if (len(potential_gal) == 0):
return
count += 1
print("There were {0} galaxies that had an energetic quasar wind event at snapshot {1} (z = {2:.3f})".format(len(potential_gal), snapshot_idx, AllVars.SnapZ[snapshot_idx]))
chosen_gal = potential_gal[1]
lenhistory_array = np.empty((int(snapshot_thickness*2 + 1)))
SFR_array = np.empty((int(snapshot_thickness*2 + 1)))
gridhistory_array = np.empty((int(snapshot_thickness*2 + 1)))
coldgas_array = np.empty((int(snapshot_thickness*2 + 1)))
t = np.empty((int(snapshot_thickness*2 + 1)))
for i in range(-snapshot_thickness, snapshot_thickness+1):
#print("SFR {0} {1}".format(snapshot_idx + i, G.GridSFR[chosen_gal, snapshot_idx+i]))
#print("ColdGas {0} {1}".format(snapshot_idx + i, G.GridColdGas[chosen_gal, snapshot_idx+i]))
lenhistory_array[i+snapshot_thickness] = (G.LenHistory[chosen_gal, snapshot_idx+i])
SFR_array[i+snapshot_thickness] = (G.GridSFR[chosen_gal, snapshot_idx+i]) #- (G.GridSFR[chosen_gal, snapshot_idx])
gridhistory_array[i+snapshot_thickness] = (G.GridHistory[chosen_gal, snapshot_idx+i])
coldgas_array[i+snapshot_thickness] = (G.GridColdGas[chosen_gal, snapshot_idx+i] * 1.0e10 / AllVars.Hubble_h) #- (G.GridColdGas[chosen_gal, snapshot_idx])
t[i+snapshot_thickness] = (-AllVars.Lookback_Time[snapshot_idx+i] + AllVars.Lookback_Time[snapshot_idx]) * 1.0e3
print("Len History {0}".format(lenhistory_array))
print("Grid History {0}".format(gridhistory_array))
print("Cold Gas {0}".format(coldgas_array))
print("SFR {0}".format(SFR_array))
stellarmass_text = r"$log M_* = {0:.2f} \: M_\odot$".format(np.log10(G.GridStellarMass[chosen_gal, snapshot_idx] * 1.0e10 / AllVars.Hubble_h))
Ndym_text = "Dynamical Time = {0:.2f} Myr".format(G.DynamicalTime[chosen_gal, snapshot_idx])
z_text = "z = {0:.2f}".format(AllVars.SnapZ[snapshot_idx])
ax1.text(0.05, 0.95, z_text, transform = ax1.transAxes, fontsize = PlotScripts.global_fontsize - 4)
ax1.text(0.05, 0.9, stellarmass_text, transform = ax1.transAxes, fontsize = PlotScripts.global_fontsize - 4)
ax1.text(0.05, 0.85, Ndym_text, transform = ax1.transAxes, fontsize = PlotScripts.global_fontsize - 4)
ax1.plot(t, SFR_array, color = 'r', lw = PlotScripts.global_linewidth)
ax2.plot(t, coldgas_array, color = 'b', lw = PlotScripts.global_linewidth)
ax1.set_xlabel(r"$\mathrm{Time \: Since \: Quasar \: Event \: [Myr]}$", size = PlotScripts.global_labelsize - 10)
# ax1.set_ylabel(r"$\mathrm{Fractional \: SFR \: Relative \: To \: SFR_{Quasar}}$", size = PlotScripts.global_labelsize - 10)
# ax2.set_ylabel(r"$\mathrm{Difference \: Cold \: Gas \: Mass \: Relative \: To \: Cold_{Quasar}}$", size = PlotScripts.global_labelsize - 10)
ax1.set_ylabel(r"$\mathrm{SFR} \: [\mathrm{M}_\odot \mathrm{yr}^{-1}]$", size = PlotScripts.global_labelsize - 10)
ax2.set_ylabel(r"$\mathrm{Cold \: Gas \: Mass \: [\mathrm{M}_\odot]}$",size = PlotScripts.global_labelsize - 10)
ax1.set_yscale('log', nonposy='clip')
ax2.set_yscale('log', nonposy='clip')
ax1.plot(np.nan, np.nan, color = 'r', label = r"$\mathrm{SFR}$")
ax1.plot(np.nan, np.nan, color = 'b', label = r"$\mathrm{Cold \: Gas}$")
leg = ax1.legend(loc='upper right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile = "{0}_galaxy{2}{1}".format(output_tag, output_format, chosen_gal)
plt.tight_layout()
plt.savefig(outputFile)
print("Saved to {0}".format(outputFile))
plt.close()
exit()
###
def plot_stellarmass_blackhole(SnapList, simulation_norm, mean_galaxy_BHmass,
std_galaxy_BHmass, N_galaxy_BHmass, FirstFile,
LastFile, NumFile, model_tags, output_tag):
master_mean_SMBH, master_std_SMBH, master_N, master_bin_middle = \
collect_across_tasks(mean_galaxy_BHmass, std_galaxy_BHmass,
N_galaxy_BHmass, SnapList, SnapList, True,
m_gal_low, m_gal_high)
if rank == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
for model_number in range(0, len(SnapList)):
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
box_factor = (LastFile[model_number] - FirstFile[model_number] + 1.0)/(NumFile[model_number]) # This factor allows us to take a sub-volume of the box and scale the results to represent the entire box.
norm = pow(AllVars.BoxSize,3) / pow(AllVars.Hubble_h, 3) * bin_width * box_factor
for snapshot_idx in range(0, len(SnapList[model_number])):
w = np.where((master_N[model_number][snapshot_idx] > 0.0))[0]
mean = np.log10(master_mean_SMBH[model_number][snapshot_idx][w])
upper = np.log10(np.add(master_mean_SMBH[model_number][snapshot_idx][w],
master_std_SMBH[model_number][snapshot_idx][w]))
lower = np.log10(np.subtract(master_mean_SMBH[model_number][snapshot_idx][w],
master_std_SMBH[model_number][snapshot_idx][w]))
label = "z = {0:.2f}" \
.format(AllVars.SnapZ[SnapList[model_number][snapshot_idx]])
ax1.plot(master_bin_middle[model_number][snapshot_idx][w],
mean, label = label, color = PlotScripts.colors[snapshot_idx],
ls = PlotScripts.linestyles[model_number],
lw = PlotScripts.global_linewidth, rasterized = True)
#ax1.fill_between(bin_middle_stellar_array[model_number][snapshot_idx][w], lower, upper, color = PlotScripts.colors[model_number], alpha = 0.25)
ax2.plot(master_bin_middle[model_number][snapshot_idx][w],
master_N[model_number][snapshot_idx][w] / norm,
label = label, ls = PlotScripts.linestyles[model_number],
lw = PlotScripts.global_linewidth, rasterized = True)
Obs.Get_Data_SMBH()
PlotScripts.plot_SMBH_z8(ax1)
ax1.set_xlabel(r"$\log_{10}\mathrm{M}_* [\mathrm{M}_\odot]$", size = PlotScripts.global_fontsize)
ax1.set_ylabel(r"$\log_{10}\mathrm{M}_\mathrm{BH} [\mathrm{M}_\odot]$", size = PlotScripts.global_fontsize)
ax2.set_xlabel(r"$\log_{10}\mathrm{M}_\mathrm{BH} [\mathrm{M}_\odot]$", size = PlotScripts.global_fontsize)
ax2.set_ylabel(r'$\Phi\ [\mathrm{Mpc}^{-3}\: \mathrm{dex}^{-1}]$', fontsize = PlotScripts.global_fontsize)
ax2.set_yscale('log', nonposy='clip')
ax1.set_xticks(np.arange(7.0, 12.0))
ax1.set_yticks(np.arange(3.0, 12.0))
ax1.xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax1.yaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax1.set_xlim([7.0, 10.25])
ax1.set_ylim([3.0, 8.0])
leg = ax1.legend(loc='upper left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
leg = ax2.legend(loc='lower left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile = "{0}{1}".format(output_tag, output_format)
plt.tight_layout()
fig.savefig(outputFile)
print("Saved to {0}".format(outputFile))
plt.close(fig)
outputFile2 = "{0}_MF{1}".format(output_tag, output_format)
plt.tight_layout()
fig2.savefig(outputFile2)
print("Saved to {0}".format(outputFile2))
plt.close(fig2)
###
def plot_reionmod(PlotSnapList, SnapList, simulation_norm, mean_reionmod_halo,
std_reionmod_halo, N_halo, mean_reionmod_z, std_reionmod_z,
N_reionmod, plot_z, model_tags, output_tag):
"""
Plot the reionization modifier as a function of halo mass and redshift.
Parameters
----------
PlotSnapList, SnapList: 2D Nested arrays of integers. Outer length is equal to the number of models and inner length is number of snapshots we're plotting/calculated for.
PlotSnapList contains the snapshots for each model we will plot for the halo mass figure.
SnapList contains the snapshots for each model that we have performed calculations for. These aren't equal because we don't want to plot halo curves for ALL redshifts.
simulation_norm: Array of integers. Length is equal to the number of models.
Contains the simulation identifier for each model. Used to set the parameters of each model.
mean_reionmod_halo, std_reionmod_halo: 3D Nested arrays of floats. Most outer length is equal to the number of models, next length is number of snapshots for each model, then inner-most length is the number of halo mass- bins (given by NB).
Contains the mean/standard deviation values for the reionization modifier as a function of halo mass.
NOTE: These are unique for each task.
N_halo: 3D Nested arrays of floats. Lengths are identical to mean_reionmod_halo.
Contains the number of halos in each halo mass bin.
NOTE: These are unique for each task.
mean_reionmod_z, std_reionmod_z: 2D Nested arrays of floats. Outer length is equal to the number of models, inner length is the number of snapshots for each model. NOTE: This inner length can be different to the length of PlotSnapList as we don't necessarily need to plot for every snapshot we calculate.
Contains the mean/standard deviation values for the rieonization modifier as a function of redshift.
NOTE: These are unique for each task.
N_reionmod: 2D Nested arrays of floats. Lengths are identical to mean_reionmod_z.
Contains the number of galaxies at each redshift that have non-negative reionization modifier. A negative reionization modifier is a galaxy who didn't have infall/stripping during the snapshot.
NOTE: These are unique for each task.
plot_z: Boolean.
Denotes whether we want to plot the reionization modifier as a function
of redshift. Useful because we often only calculate statistics for a
subset of the snapshots to decrease computation time. For these runs,
we don't want to plot for something that requires ALL snapshots.
model_tags: Array of strings. Length is equal to the number of models.
Contains the legend labels for each model.
output_tag: String.
The prefix for the output file.
Returns
----------
None. Plot is saved in current directory as "./<output_tag>.<output_format>"
"""
master_mean_reionmod_halo, master_std_reionmod_halo,
master_N_reionmod_halo, master_bin_middle = collect_across_tasks(mean_reionmod_halo,
std_reionmod_halo,
N_halo, SnapList,
PlotSnapList, True,
m_low, m_high)
if plot_z:
master_mean_reionmod_z, master_std_reionmod_z, master_N_reionmod_z, _ = collect_across_tasks(mean_reionmod_z,
std_reionmod_z,
N_reionmod)
if rank == 0:
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
if plot_z:
fig2 = plt.figure()
ax10 = fig2.add_subplot(111)
for model_number in range(len(PlotSnapList)):
if(simulation_norm[model_number] == 1):
cosmo = AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
cosmo = AllVars.Set_Params_Tiamat_extended()
elif(simulation_norm[model_number] == 4):
cosmo = AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
cosmo = AllVars.Set_Params_Kali()
for snapshot_idx in range(len((PlotSnapList[model_number]))):
if snapshot_idx == 0:
label = model_tags[model_number]
else:
label = ""
nonzero_bins = np.where(master_N_reionmod_halo[model_number][snapshot_idx] > 0.0)[0]
ax1.plot(master_bin_middle[model_number][snapshot_idx][nonzero_bins],
master_mean_reionmod_halo[model_number][snapshot_idx][nonzero_bins],
label = label, ls = PlotScripts.linestyles[model_number],
color = PlotScripts.colors[snapshot_idx])
if plot_z:
ax10.plot((AllVars.t_BigBang - AllVars.Lookback_Time[SnapList[model_number]])*1.0e3, master_mean_reionmod_z[model_number], color = PlotScripts.colors[model_number], label = model_tags[model_number], ls = PlotScripts.linestyles[model_number], lw = 3)
for count, snapshot_idx in enumerate(PlotSnapList[model_number]):
#label = r"$\mathbf{z = " + str(int(round(AllVars.SnapZ[snapshot_idx]))) + "}$"
label = r"$\mathbf{z = " + str(AllVars.SnapZ[snapshot_idx]) + "}$"
ax1.plot(np.nan, np.nan, ls = PlotScripts.linestyles[0], color =
PlotScripts.colors[count], label = label)
ax1.set_xlim([8.5, 11.5])
ax1.set_ylim([0.0, 1.05])
ax1.set_xlabel(r'$\mathbf{log_{10} \: M_{vir} \:[M_{\odot}]}$', fontsize = PlotScripts.global_labelsize)
ax1.set_ylabel(r'$\mathbf{Mean ReionMod}$', fontsize = PlotScripts.global_labelsize)
leg = ax1.legend(loc='lower right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile1 = "./{0}_halo{1}".format(output_tag, output_format)
fig1.savefig(outputFile1, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile1))
plt.close(fig1)
if plot_z:
ax10.set_xlabel(r"$\mathbf{Time \: since \: Big \: Bang \: [Myr]}$", fontsize = PlotScripts.global_labelsize)
tick_locs = np.arange(200.0, 1000.0, 100.0)
tick_labels = [r"$\mathbf{%d}$" % x for x in tick_locs]
ax10.xaxis.set_major_locator(mtick.MultipleLocator(100))
ax10.set_xticklabels(tick_labels, fontsize = PlotScripts.global_fontsize)
ax10.set_xlim(PlotScripts.time_xlim)
ax10.set_ylabel(r'$\mathbf{Mean ReionMod}$', fontsize = PlotScripts.global_labelsize)
ax11 = ax10.twiny()
t_plot = (AllVars.t_BigBang - cosmo.lookback_time(PlotScripts.z_plot).value) * 1.0e3 # Corresponding Time values on the bottom.
z_labels = ["$\mathbf{%d}$" % x for x in PlotScripts.z_plot] # Properly Latex-ize the labels.
ax11.set_xlabel(r"$\mathbf{z}$", fontsize = PlotScripts.global_labelsize)
ax11.set_xlim(PlotScripts.time_xlim)
ax11.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax11.set_xticklabels(z_labels, fontsize = PlotScripts.global_fontsize) # But label them as redshifts.
leg = ax10.legend(loc='lower right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile2 = "./{0}_z{1}".format(output_tag, output_format)
fig2.savefig(outputFile2, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile2))
plt.close(fig2)
##
def plot_dust(PlotSnapList, SnapList, simulation_norm, mean_dust_galaxy, std_dust_galaxy,
N_galaxy, mean_dust_halo, std_dust_halo, N_halo, plot_z,
model_tags, output_tag):
"""
"""
master_mean_dust_galaxy, master_std_dust_galaxy, master_N_dust_galaxy, master_bin_middle_galaxy = \
collect_across_tasks(mean_dust_galaxy, std_dust_galaxy, N_galaxy, SnapList,
PlotSnapList, True, m_gal_low, m_gal_high)
master_mean_dust_halo, master_std_dust_halo, master_N_dust_halo, master_bin_middle_halo = \
collect_across_tasks(mean_dust_halo, std_dust_halo, N_halo, SnapList,
PlotSnapList, True, m_low, m_high)
if rank == 0:
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
for model_number in range(len(PlotSnapList)):
if(simulation_norm[model_number] == 1):
cosmo = AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
cosmo = AllVars.Set_Params_Tiamat_extended()
elif(simulation_norm[model_number] == 4):
cosmo = AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
cosmo = AllVars.Set_Params_Kali()
for snapshot_idx in range(len((PlotSnapList[model_number]))):
if snapshot_idx == 0:
label = model_tags[model_number]
else:
label = ""
nonzero_bins = np.where(master_N_dust_galaxy[model_number][snapshot_idx] > 0.0)[0]
ax1.plot(master_bin_middle_galaxy[model_number][snapshot_idx][nonzero_bins],
master_mean_dust_galaxy[model_number][snapshot_idx][nonzero_bins],
label = label, ls = PlotScripts.linestyles[model_number],
color = PlotScripts.colors[snapshot_idx])
nonzero_bins = np.where(master_N_dust_halo[model_number][snapshot_idx] > 0.0)[0]
ax2.plot(master_bin_middle_halo[model_number][snapshot_idx][nonzero_bins],
master_mean_dust_halo[model_number][snapshot_idx][nonzero_bins],
label = label, ls = PlotScripts.linestyles[model_number],
color = PlotScripts.colors[snapshot_idx])
print(master_mean_dust_halo[model_number][snapshot_idx])
for count, snapshot_idx in enumerate(PlotSnapList[model_number]):
#label = r"$\mathbf{z = " + str(int(round(AllVars.SnapZ[snapshot_idx]))) + "}$"
label = r"$\mathbf{z = " + str(AllVars.SnapZ[snapshot_idx]) + "}$"
ax1.plot(np.nan, np.nan, ls = PlotScripts.linestyles[0], color =
PlotScripts.colors[count], label = label)
ax2.plot(np.nan, np.nan, ls = PlotScripts.linestyles[0], color =
PlotScripts.colors[count], label = label)
ax1.set_xlim([2.0, 10.5])
#ax1.set_ylim([1.0, 6.0])
ax1.set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$', fontsize = PlotScripts.global_labelsize)
ax1.set_ylabel(r'$\mathbf{log_{10} \: \langle M_{Dust}\rangle_{M*}}$', fontsize = PlotScripts.global_labelsize)
leg = ax1.legend(loc='upper left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile1 = "./{0}_galaxy{1}".format(output_tag, output_format)
fig1.savefig(outputFile1, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile1))
plt.close(fig1)
ax2.set_xlim([6.8, 11.5])
#ax2.set_ylim([1.0, 6.0])
ax2.set_xlabel(r'$\mathbf{log_{10} \: M_{vir} \:[M_{\odot}]}$', fontsize = PlotScripts.global_labelsize)
ax2.set_ylabel(r'$\mathbf{log_{10} \: \langle M_{Dust}\rangle_{Mvir}}$', fontsize = PlotScripts.global_labelsize)
leg = ax2.legend(loc='upper left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile2 = "./{0}_halo{1}".format(output_tag, output_format)
fig2.savefig(outputFile2, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile2))
plt.close(fig2)
def plot_dust_scatter(SnapList, mass_gal, mass_halo, mass_dust, output_tag):
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
fig3 = plt.figure()
ax3 = fig3.add_subplot(111, projection='3d')
fig4 = plt.figure()
ax4 = fig4.add_subplot(111)
ax1.scatter(mass_gal, mass_dust)
ax2.scatter(mass_halo, mass_dust)
#ax3.scatter(mass_gal, mass_halo, mass_dust)
hb = ax4.hexbin(mass_halo, mass_dust, bins='log', cmap='inferno')
ax1.set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$', fontsize = PlotScripts.global_labelsize)
ax1.set_ylabel(r'$\mathbf{log_{10} \: M_{Dust}}$', fontsize = PlotScripts.global_labelsize)
ax2.set_xlabel(r'$\mathbf{log_{10} \: M_{vir} \:[M_{\odot}]}$', fontsize = PlotScripts.global_labelsize)
ax2.set_ylabel(r'$\mathbf{log_{10} \: M_{Dust}}$', fontsize = PlotScripts.global_labelsize)
ax4.set_xlabel(r'$\mathbf{log_{10} \: M_{vir} \:[M_{\odot}]}$', fontsize = PlotScripts.global_labelsize)
ax4.set_ylabel(r'$\mathbf{log_{10} \: M_{Dust}}$', fontsize = PlotScripts.global_labelsize)
cb = fig4.colorbar(hb, ax=ax4)
cb.set_label('log10(N)')
outputFile1 = "./{0}_galaxy{1}".format(output_tag, output_format)
fig1.savefig(outputFile1, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile1))
plt.close(fig1)
outputFile2 = "./{0}_halo{1}".format(output_tag, output_format)
fig2.savefig(outputFile2, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile2))
plt.close(fig2)
#outputFile3 = "./{0}_3D{1}".format(output_tag, output_format)
#fig3.savefig(outputFile3, bbox_inches='tight') # Save the figure
#print('Saved file to {0}'.format(outputFile3))
#plt.close(fig3)
outputFile4 = "./{0}_hexbin{1}".format(output_tag, output_format)
fig4.savefig(outputFile4, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile4))
plt.close(fig4)
### Here ends the plotting functions. ###
### Here begins the functions that calculate various properties for the galaxies (fesc, Magnitude etc). ###
def Calculate_HaloPartStellarMass(halo_part, stellar_mass, bound_low, bound_high):
'''
Calculates the stellar mass for galaxies whose host halos contain a specified number of particles.
Parameters
----------
halo_part : array
Array containing the number of particles inside each halo.
stellar_mass : array
Array containing the Stellar Mass for each galaxy (entries align with HaloPart). Units of log10(Msun).
bound_low, bound_high : int
We calculate the Stellar Mass of galaxies whose host halo has, bound_low <= halo_part <= bound_high.
Return
-----
mass, mass_std : float
Mean and standard deviation stellar mass of galaxies whose host halo has number of particles between the specified bounds. Units of log10(Msun)
Units
-----
Input Stellar Mass is in units of log10(Msun).
Output mean/std Stellar Mass is in units of log10(Msun).
'''
w = np.where((halo_part >= bound_low) & (halo_part <= bound_high))[0] # Find the halos with particle number between the bounds.
mass = np.mean(10**(stellar_mass[w]))
mass_std = np.std(10**(stellar_mass[w]))
return np.log10(mass), np.log10(mass_std)
##
def calculate_UV_extinction(z, L, M):
'''
Calculates the observed UV magnitude after dust extinction is accounted for.
Parameters
----------
z : float
Redshift we are calculating the extinction at.
L, M : array, length equal to the number of galaxies at this snapshot.
Array containing the UV luminosities and magnitudes.
Returns
-------
M_UV_obs : array, length equal to the number of galaxies at this snapshot.
Array containing the observed UV magnitudes.
Units
-----
Luminosities are in units of log10(erg s^-1 A^-1).
Magnitudes are in the AB system.
'''
M_UV_bins = np.arange(-24, -16, 0.1)
A_mean = np.zeros((len(MUV_bins))) # A_mean is the average UV extinction for a given UV bin.
for j in range(0, len(M_UV_bins)):
beta = calculate_beta(M_UV_bins[j], AllVars.SnapZ[current_snap]) # Fits the beta parameter for the current redshift/UV bin.
dist = np.random.normal(beta, 0.34, 10000) # Generates a normal distribution with mean beta and standard deviation of 0.34.
A = 4.43 + 1.99*dist
A[A < 0] = 0 # Negative extinctions don't make sense.
A_Mean[j] = np.mean(A)
indices = np.digitize(M, M_UV_bins) # Bins the simulation magnitude into the MUV bins. Note that digitize defines an index i if bin[i-1] <= x < bin[i] whereas I prefer bin[i] <= x < bin[i+1]
dust = A_Mean[indices]
flux = AllVars.Luminosity_to_Flux(L, 10.0) # Calculate the flux from a distance of 10 parsec, units of log10(erg s^-1 A^-1 cm^-2).
flux_observed = flux - 0.4*dust
f_nu = ALlVars.spectralflux_wavelength_to_frequency(10**flux_observed, 1600) # Spectral flux desnity in Janksy.
M_UV_obs(-2.5 * np.log10(f_nu) + 8.90) # AB Magnitude from http://www.astro.ljmu.ac.uk/~ikb/convert-units/node2.html
return M_UV_obs
##
def update_cumulative_stats(mean_pool, std_pool, N_pool, mean_local, std_local, N_local):
'''
Update the cumulative statistics (such as Stellar Mass Function, Mvir-Ngamma, fesc-z) that are saved across files.
Pooled mean formulae taken : from https://www.ncbi.nlm.nih.gov/books/NBK56512/
Pooled variance formulae taken from : https://en.wikipedia.org/wiki/Pooled_variance
Parameters
----------
mean_pool, std_pool, N_pool : array of floats with length equal to the number of bins (e.g. the mass bins for the Stellar Mass Function).
The current mean, standard deviation and number of data points within in each bin. This is the array that will be updated in this function.
mean_local, std_local, N_local : array of floats with length equal to the number of bins.
The mean, standard deviation and number of data points within in each bin that will be added to the pool.
Returns
-------
mean_pool, std_pool, N_pool : (See above)
The updated arrays with the local values added and accounted for within the pools.
Units
-----
All units are kept the same as the input units.
Values are in real-space (not log-space).
'''
N_times_mean_local = np.multiply(N_local, mean_local)
N_times_var_local = np.multiply(N_local - 1, np.multiply(std_local, std_local)) # Actually N - 1 because of Bessel's Correction
# https://en.wikipedia.org/wiki/Bessel%27s_correction). #
N_times_mean_pool = np.add(N_times_mean_local, np.multiply(N_pool, mean_pool))
N_times_var_pool = np.add(N_times_var_local, np.multiply(N_pool - 1, np.multiply(std_pool, std_pool)))
N_pool = np.add(N_local, N_pool)
'''
print(mean_local)
print(type(mean_local))
print((type(mean_local).__module__ == np.__name__))
print(isinstance(mean_local, list))
print(isinstance(mean_local,float64))
print(isinstance(mean_local,float32))
'''
if (((type(mean_local).__module__ == np.__name__) == True or (isinstance(mean_local, list) == True)) and isinstance(mean_local, float) == False and isinstance(mean_local, int) == False and isinstance(mean_local,float32) == False and isinstance(mean_local, float64) == False): # Checks to see if we are dealing with arrays.
for i in range(0, len(N_pool)):
if(N_pool[i] == 0): # This case is when we have no data points in the bin.
mean_pool[i] = 0.0
else:
mean_pool[i] = N_times_mean_pool[i]/N_pool[i]
if(N_pool[i] < 3): # In this instance we don't have enough data points to properly calculate the standard deviation.
std_pool[i] = 0.0
else:
std_pool[i] = np.sqrt(N_times_var_pool[i]/ (N_pool[i] - 2)) # We have -2 because there is two instances of N_pool contains two 'N - 1' terms.
else:
mean_pool = N_times_mean_pool / N_pool
if(N_pool < 3):
std_pool = 0.0
else:
std_pool = np.sqrt(N_times_var_pool / (N_pool - 2))
return mean_pool, std_pool
### Here ends the functions that deal with galaxy data manipulation. ###
#################################
if __name__ == '__main__':
np.seterr(divide='ignore')
number_models = 4
galaxies_model1="/fred/oz004/jseiler/kali/self_consistent_output/rsage_constant/galaxies/const_0.3_z5.782"
merged_galaxies_model1="/fred/oz004/jseiler/kali/self_consistent_output/rsage_constant/galaxies/const_0.3_MergedGalaxies"
photo_model1="/fred/oz004/jseiler/kali/self_consistent_output/rsage_constant/grids/cifog/const_0.3_photHI"
zreion_model1="/fred/oz004/jseiler/kali/self_consistent_output/rsage_constant/grids/cifog/const_0.3_reionization_redshift"
galaxies_model2="/fred/oz004/jseiler/kali/self_consistent_output/rsage_fej/galaxies/fej_alpha0.40_beta0.05_z5.782"
merged_galaxies_model2="/fred/oz004/jseiler/kali/self_consistent_output/rsage_fej/galaxies/fej_alpha0.40_beta0.05_MergedGalaxies"
photo_model2="/fred/oz004/jseiler/kali/self_consistent_output/rsage_fej/grids/cifog/fej_alpha0.40_beta0.05_photHI"
zreion_model2="/fred/oz004/jseiler/kali/self_consistent_output/rsage_fej/grids/cifog/fej_alpha0.40_beta0.05_reionization_redshift"
galaxies_model3="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHneg/galaxies/MHneg_1e8_1e12_0.99_0.05_z5.782"
merged_galaxies_model3="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHneg/galaxies/MHneg_1e8_1e12_0.99_0.05_MergedGalaxies"
photo_model3="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHneg/grids/cifog/MHneg_1e8_1e12_0.99_0.05_photHI"
zreion_model3="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHneg/grids/cifog/MHneg_1e8_1e12_0.99_0.05_reionization_redshift"
galaxies_model4="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHpos/galaxies/MHpos_1e8_1e12_0.01_0.50_z5.782"
merged_galaxies_model4="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHpos/galaxies/MHpos_1e8_1e12_0.01_0.50_MergedGalaxies"
photo_model4="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHpos/grids/cifog/MHpos_1e8_1e12_0.01_0.50_photHI"
zreion_model4="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHpos/grids/cifog/MHpos_1e8_1e12_0.01_0.50_reionization_redshift"
galaxies_filepath_array = [galaxies_model1,
galaxies_model2,
galaxies_model3,
galaxies_model4]
photo_array = [photo_model1,
photo_model2,
photo_model3,
photo_model4]
zreion_array = [zreion_model1,
zreion_model2,
zreion_model3,
zreion_model4]
GridSize_array = [256,
256,
256,
256]
precision_array = [2,
2,
2,
2]
merged_galaxies_filepath_array = [merged_galaxies_model1,
merged_galaxies_model2,
merged_galaxies_model3,
merged_galaxies_model4]
number_substeps = [10, 10, 10, 10] # How many substeps does each model have (specified by STEPS variable within SAGE).
number_snapshots = [99, 99, 99, 99] # Number of snapshots in the simulation (we don't have to do calculations for ALL snapshots).
# Tiamat extended has 164 snapshots.
FirstFile = [0, 0, 0, 0] # The first file number THAT WE ARE PLOTTING.
#LastFile = [63, 63, 63, 63] # The last file number THAT WE ARE PLOTTING.
LastFile = [0, 0, 0, 0] # The last file number THAT WE ARE PLOTTING.
NumFile = [64, 64, 64, 64] # The number of files for this simulation (plotting a subset of these files is allowed).
same_files = [0, 0, 0, 0] # In the case that model 1 and model 2 (index 0 and 1) have the same files, we don't want to read them in a second time.
# This array will tell us if we should keep the files for the next model or otherwise throw them away.
# The files will be kept until same_files[current_model_number] = 0.
# For example if we had 5 models we were plotting and model 1, 2, 3 shared the same files and models 4, 5 shared different files,
# Then same_files = [1, 1, 0, 1, 0] would be the correct values.
done_model = np.zeros((number_models)) # We use this to keep track of if we have done a model already.
model_tags = [r"$\mathbf{f_\mathrm{esc} \: Constant}$",
r"$\mathbf{f_\mathrm{esc} \: \propto \: f_\mathrm{ej}}$",
r"$\mathbf{f_\mathrm{esc} \: \propto \: M_\mathrm{H}^{-1}}$",
r"$\mathbf{f_\mathrm{esc} \: \propto \: M_\mathrm{H}}$"]
## Constants used for each model. ##
# Need to add an entry for EACH model. #
halo_cut = [32, 32, 32, 32] # Only calculate properties for galaxies whose host halos have at least this many particles.
# For Tiamat, z = [6, 7, 8] are snapshots [78, 64, 51]
# For Kali, z = [6, 7, 8] are snapshots [93, 76, 64]
#SnapList = [np.arange(0,99), np.arange(0,99)] # These are the snapshots over which the properties are calculated. NOTE: If the escape fraction is selected (fesc_prescription == 3) then this should be ALL the snapshots in the simulation as this prescriptions is temporally important.
#SnapList = [np.arange(20,99), np.arange(20, 99), np.arange(20, 99)]
SnapList = [[33, 50, 76, 93],
[33, 50, 76, 93],
[33, 50, 76, 93],
[33, 50, 76, 93]]
#SnapList = [[64],
# [64],
# [64],
# [64]]
#SnapList = [[33, 50, 64, 76, 93]]
#SnapList = [[64], [64]]
#SnapList = [np.arange(20,99)]
#PlotSnapList = [[30, 50, 64, 76, 93]]
#PlotSnapList = [[93, 76, 64], [93, 76, 64]]
#SnapList = [[93, 76, 64], [93, 76, 64]]
PlotSnapList = SnapList
simulation_norm = [5, 5, 5, 5] # Changes the constants (cosmology, snapshot -> redshift mapping etc) for each simulation.
# 0 for MySim (Manodeep's old one).
# 1 for Mini-Millennium.
# 2 for Tiamat (up to z =5).
# 3 for extended Tiamat (down to z = 1.6ish).
# 4 for Britton's Sim Pip
# 5 for Manodeep's new simulation Kali.
stellar_mass_halolen_lower = [32, 95, 95, 95] # These limits are for the number of particles in a halo.
stellar_mass_halolen_upper = [50, 105, 105, 105] # We calculate the average stellar mass for galaxies whose host halos have particle count between these limits.
calculate_observed_LF = [0, 0, 0, 0] # Determines whether we want to account for dust extinction when calculating the luminosity function of each model.
paper_plots = 1
##############################################################################################################
## Do a few checks to ensure all the arrays were specified properly. ##
for model_number in range(0,number_models):
assert(LastFile[model_number] - FirstFile[model_number] + 1 >= size)
if(simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif(simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
else:
print("Simulation norm was set to {0}.".format(simulation_norm[model_number]))
raise ValueError("This option has been implemented yet. Get your head in the game Jacob!")
if (number_snapshots[model_number] != len(AllVars.SnapZ)): # Here we do a check to ensure that the simulation we've defined correctly matches the number of snapshots we have also defined.
print("The number_snapshots array is {0}".format(number_snapshots))
print("The simulation_norm array is {0}".format(simulation_norm))
print("The number of snapshots for model_number {0} has {1} but you've said there is only {2}".format(model_number, len(AllVars.SnapZ), number_snapshots[model_number]))
raise ValueError("Check either that the number of snapshots has been defined properly and that the normalization option is correct.")
######################################################################
##################### SETTING UP ARRAYS ##############################
######################################################################
### The arrays are set up in a 3 part process. ###
### This is because our arrays are 3D nested to account for the model number and snapshots. ###
# First set up the outer most array. #
## Arrays for functions of stellar mass. ##
SMF = [] # Stellar Mass Function.
mean_fesc_galaxy_array = [] # Mean escape fraction as a function of stellar mass.
std_fesc_galaxy_array = [] # Same as above but standard devation.
N_galaxy_array = [] # Number of galaxies as a function of stellar mass.
mean_BHmass_galaxy_array = [] # Black hole mass as a function of stellar mass.
std_BHmass_galaxy_array = [] # Same as above but standard deviation.
mergers_galaxy_array = [] # Number of mergers as a function of halo mass.
mean_dust_galaxy_array = [] # Mean dust mass as a function of stellar mass.
std_dust_galaxy_array = [] # Same as above but standard deviation.
mean_sfr_galaxy_array = [] # Mean star formation rate as a
# function of stellar mass
std_sfr_galaxy_array = [] # Same as above but standard deviation.
mean_ssfr_galaxy_array = [] # Mean specific star formation rate as a
# function of stellar mass
std_ssfr_galaxy_array = [] # Same as above but standard deviation.
mean_Ngamma_galaxy_array = [] # Mean number of ionizing photons emitted as
# a function of stellar mass.
std_Ngamma_galaxy_array = [] # Same as above but standard deviation.
mean_photo_galaxy_array = [] # Mean photoionization rate.
std_photo_galaxy_array = [] # Std photoionization rate.
mean_reionmod_galaxy_array = [] # Mean reionization modifier using RSAGE.
std_reionmod_galaxy_array = [] # Std.
mean_gnedin_reionmod_galaxy_array = [] # Mean reionization modifier using Gnedin analytic prescription.
std_gnedin_reionmod_galaxy_array = [] # Std.
## Arrays for functions of halo mass. ##
mean_ejected_halo_array = [] # Mean ejected fractions as a function of halo mass.
std_ejected_halo_array = [] # Same as above but standard deviation.
mean_fesc_halo_array = [] # Mean escape fraction as a function of halo mass.
std_fesc_halo_array = [] # Same as above but standard deviation.
mean_Ngamma_halo_array = [] # Mean number of ionizing photons THAT ESCAPE as a function of halo mass.
std_Ngamma_halo_array = [] # Same as above but standard deviation.
N_halo_array = [] # Number of galaxies as a function of halo mass.
mergers_halo_array = [] # Number of mergers as a function of halo mass.
mean_quasar_activity_array = [] # Mean fraction of galaxies that have quasar actvitity as a function of halo mas.
std_quasar_activity_array = [] # Same as above but standard deviation.
mean_reionmod_halo_array = [] # Mean reionization modifier as a function of halo mass.
std_reionmod_halo_array = [] # Same as above but for standard deviation.
mean_dust_halo_array = [] # Mean dust mass as a function of halo mass.
std_dust_halo_array = [] # Same as above but standard deviation.
## Arrays for functions of redshift. ##
sum_Ngamma_z_array = [] # Total number of ionizing photons THAT ESCAPE as a functio of redshift.
mean_fesc_z_array = [] # Mean number of ionizing photons THAT ESCAPE as a function of redshift.
std_fesc_z_array = [] # Same as above but standard deviation.
N_z = [] # Number of galaxies as a function of redshift.
galaxy_halo_mass_mean = [] # Mean galaxy mass as a function of redshift.
N_quasars_z = [] # This tracks how many quasars went off during a specified snapshot.
N_quasars_boost_z = [] # This tracks how many galaxies are having their escape fraction boosted by quasar activity.
dynamicaltime_quasars_mean_z = [] # Mean dynamical time of galaxies that have a quasar event as a function of redshift.
dynamicaltime_quasars_std_z = [] # Same as above but standard deviation.
dynamicaltime_all_mean_z = [] # Mean dynamical time of all galaxies.
dynamicaltime_all_std_z = [] # Same as above but standard deviation.
mean_reionmod_z = [] # Mean reionization modifier as a function of redshift.
std_reionmod_z = [] # Same as above but for standard deviation.
N_reionmod_z = [] # Number of galaxies with a non-negative reionization modifier.
mean_ejected_z = [] # Mean ejected fraction as a function of redshift.
std_ejected_z = [] # Same as above but for standard deviation.
## Arrays that aren't functions of other variables. ##
Ngamma_global = []
mass_global = []
fesc_global = []
## Arrays as a function of fej ##
mean_Ngamma_fej = []
std_Ngamma_fej = []
N_fej = []
## Now the outer arrays have been defined, set up the next nest level for the number of models. ##
for model_number in range(0,number_models):
## Galaxy Arrays ##
SMF.append([])
mean_fesc_galaxy_array.append([])
std_fesc_galaxy_array.append([])
N_galaxy_array.append([])
mean_BHmass_galaxy_array.append([])
std_BHmass_galaxy_array.append([])
mergers_galaxy_array.append([])
mean_dust_galaxy_array.append([])
std_dust_galaxy_array.append([])
mean_sfr_galaxy_array.append([])
std_sfr_galaxy_array.append([])
mean_ssfr_galaxy_array.append([])
std_ssfr_galaxy_array.append([])
mean_Ngamma_galaxy_array.append([])
std_Ngamma_galaxy_array.append([])
mean_photo_galaxy_array.append([])
std_photo_galaxy_array.append([])
mean_reionmod_galaxy_array.append([])
std_reionmod_galaxy_array.append([])
mean_gnedin_reionmod_galaxy_array.append([])
std_gnedin_reionmod_galaxy_array.append([])
## Halo arrays. ##
mean_ejected_halo_array.append([])
std_ejected_halo_array.append([])
mean_fesc_halo_array.append([])
std_fesc_halo_array.append([])
mean_Ngamma_halo_array.append([])
std_Ngamma_halo_array.append([])
N_halo_array.append([])
mergers_halo_array.append([])
mean_quasar_activity_array.append([])
std_quasar_activity_array.append([])
mean_reionmod_halo_array.append([])
std_reionmod_halo_array.append([])
mean_dust_halo_array.append([])
std_dust_halo_array.append([])
## Redshift arrays. ##
sum_Ngamma_z_array.append([])
mean_fesc_z_array.append([])
std_fesc_z_array.append([])
N_z.append([])
galaxy_halo_mass_mean.append([])
N_quasars_z.append([])
N_quasars_boost_z.append([])
dynamicaltime_quasars_mean_z.append([])
dynamicaltime_quasars_std_z.append([])
dynamicaltime_all_mean_z.append([])
dynamicaltime_all_std_z.append([])
mean_reionmod_z.append([])
std_reionmod_z.append([])
N_reionmod_z.append([])
mean_ejected_z.append([])
std_ejected_z.append([])
## Arrays that aren't functions ##
Ngamma_global.append([])
mass_global.append([])
fesc_global.append([])
## Arrays as a function of fej ##
mean_Ngamma_fej.append([])
std_Ngamma_fej.append([])
N_fej.append([])
## And then finally set up the inner most arrays ##
## NOTE: We do the counts as float so we can keep consistency when we're calling MPI operations (just use MPI.FLOAT rather than deciding if we need to use MPI.INT)
for snapshot_idx in range(len(SnapList[model_number])):
## For the arrays that are functions of stellar/halo mass, the inner most level will be an array with the statistic binned across mass ##
## E.g. SMF[model_number][snapshot_idx] will return an array whereas N_z[model_number][snapshot_idx] will return a float. ##
## Functions of stellar mass arrays. ##
SMF[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_fesc_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_fesc_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
N_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_BHmass_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_BHmass_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mergers_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_dust_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_dust_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_sfr_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_sfr_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_ssfr_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_ssfr_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_Ngamma_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_Ngamma_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_photo_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_photo_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_reionmod_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_reionmod_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_gnedin_reionmod_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_gnedin_reionmod_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
## Function of halo mass arrays. ##
mean_ejected_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
std_ejected_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
mean_fesc_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
std_fesc_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
mean_Ngamma_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
std_Ngamma_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
N_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
mergers_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
mean_quasar_activity_array[model_number].append(np.zeros((NB), dtype = np.float32))
std_quasar_activity_array[model_number].append(np.zeros((NB), dtype = np.float32))
mean_reionmod_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
std_reionmod_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
mean_dust_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
std_dust_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
## Function of Redshift arrays. ##
sum_Ngamma_z_array[model_number].append(0.0)
mean_fesc_z_array[model_number].append(0.0)
std_fesc_z_array[model_number].append(0.0)
N_z[model_number].append(0.0)
galaxy_halo_mass_mean[model_number].append(0.0)
N_quasars_z[model_number].append(0.0)
N_quasars_boost_z[model_number].append(0.0)
dynamicaltime_quasars_mean_z[model_number].append(0.0)
dynamicaltime_quasars_std_z[model_number].append(0.0)
dynamicaltime_all_mean_z[model_number].append(0.0)
dynamicaltime_all_std_z[model_number].append(0.0)
mean_reionmod_z[model_number].append(0.0)
std_reionmod_z[model_number].append(0.0)
N_reionmod_z[model_number].append(0.0)
mean_ejected_z[model_number].append(0.0)
std_ejected_z[model_number].append(0.0)
Ngamma_global[model_number].append([])
mass_global[model_number].append([])
fesc_global[model_number].append([])
## Arrays as a function of fej. ##
mean_Ngamma_fej[model_number].append(np.zeros((NB_fej), dtype = np.float32))
std_Ngamma_fej[model_number].append(np.zeros((NB_fej), dtype = np.float32))
N_fej[model_number].append(np.zeros((NB_fej), dtype = np.float32))
######################################################################
#################### ALL ARRAYS SETUP ################################
######################################################################
## Now it's (finally) time to read in all the data and do the actual work. ##
for model_number in range(number_models):
if(simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif(simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
else:
print("Simulation norm was set to {0}.".format(simulation_norm[model_number]))
raise ValueError("This option has been implemented yet. Get your head in the game Jacob!")
if (done_model[model_number] == 1): # If we have already done this model (i.e., we kept the files and skipped this loop), move along.
assert(FirstFile[model_number] == FirstFile[model_number - 1])
assert(LastFile[model_number] == LastFile[model_number - 1])
continue
for fnr in range(FirstFile[model_number] + rank, LastFile[model_number]+1, size): # Divide up the input files across the processors.
GG, Gal_Desc = ReadScripts.ReadGals_SAGE(galaxies_filepath_array[model_number], fnr, number_snapshots[model_number], comm) # Read galaxies
G_Merged, _ = ReadScripts.ReadGals_SAGE(merged_galaxies_filepath_array[model_number], fnr, number_snapshots[model_number], comm) # Also need the merged galaxies.
G = ReadScripts.Join_Arrays(GG, G_Merged, Gal_Desc) # Then join them together for all galaxies.
keep_files = 1 # Flips to 0 when we are done with this file.
current_model_number = model_number # Used to differentiate between outer model_number and the inner model_number because we can keep files across model_numbers.
while(keep_files == 1):
## Just a few definitions to cut down the clutter a smidge. ##
current_halo_cut = halo_cut[current_model_number]
NumSubsteps = number_substeps[current_model_number]
do_observed_LF = calculate_observed_LF[current_model_number]
for snapshot_idx in range(0, len(SnapList[current_model_number])): # Now let's calculate stats for each required redshift.
current_snap = SnapList[current_model_number][snapshot_idx] # Get rid of some clutter.
w_gal = np.where((G.GridHistory[:, current_snap] != -1) & (G.GridStellarMass[:, current_snap] > 0.0) & (G.LenHistory[:, current_snap] > current_halo_cut) & (G.GridSFR[:, current_snap] >= 0.0) & (G.GridFoFMass[:, current_snap] >= 0.0))[0] # Only include those galaxies that existed at the current snapshot, had positive (but not infinite) stellar/Halo mass and Star formation rate. Ensure the galaxies also resides in a halo that is sufficiently resolved.
w_merged_gal = np.where((G_Merged.GridHistory[:, current_snap] != -1) & (G_Merged.GridStellarMass[:, current_snap] > 0.0) & (G_Merged.LenHistory[:, current_snap] > current_halo_cut) & (G_Merged.GridSFR[:, current_snap] >= 0.0) & (G_Merged.GridFoFMass[:, current_snap] >= 0.0) & (G_Merged.LenMergerGal[:,current_snap] > current_halo_cut))[0]
print("There were {0} galaxies for snapshot {1} (Redshift {2:.3f}) model {3}.".format(len(w_gal), current_snap, AllVars.SnapZ[current_snap], current_model_number))
if (len(w_gal) == 0):
continue
mass_gal = np.log10(G.GridStellarMass[w_gal, current_snap] * 1.0e10 / AllVars.Hubble_h) # Msun. Log Units.
w_SFR = w_gal[np.where((G.GridSFR[w_gal, current_snap] > 0.0))[0]]
mass_SFR_gal = np.log10(G.GridStellarMass[w_SFR, current_snap] * \
1.0e10 / AllVars.Hubble_h)
SFR_gal = np.log10(G.GridSFR[w_SFR,current_snap])
sSFR_gal = SFR_gal - mass_SFR_gal
halo_part_count = G.LenHistory[w_gal, current_snap]
metallicity_gal = G.GridZ[w_gal, current_snap]
metallicity_tremonti_gal = np.log10(G.GridZ[w_gal, current_snap] / 0.02) + 9.0 # Using the Tremonti relationship for metallicity.
mass_central = np.log10(G.GridFoFMass[w_gal, current_snap] * 1.0e10 / AllVars.Hubble_h) # Msun. Log Units.
ejected_fraction = G.EjectedFraction[w_gal, current_snap]
w_dust = np.where(((G.GridDustColdGas[w_gal, current_snap]
+G.GridDustHotGas[w_gal, current_snap]
+G.GridDustEjectedMass[w_gal, current_snap]) > 0.0)
& (G.GridType[w_gal, current_snap] == 0))[0]
total_dust_gal = np.log10((G.GridDustColdGas[w_gal[w_dust], current_snap]
+G.GridDustHotGas[w_gal[w_dust], current_snap]
+G.GridDustEjectedMass[w_gal[w_dust], current_snap])
* 1.0e10 / AllVars.Hubble_h)
mass_gal_dust = np.log10(G.GridStellarMass[w_gal[w_dust], current_snap]
* 1.0e10 / AllVars.Hubble_h)
mass_centralgal_dust = np.log10(G.GridFoFMass[w_gal[w_dust], current_snap]
* 1.0e10 / AllVars.Hubble_h)
fesc = G.Gridfesc[w_gal, current_snap]
fesc[fesc < 0.0] = 0.0
Ngamma_gal = G.GridNgamma_HI[w_gal, current_snap] # 1.0e50
# photons/s.
if model_number < 3:
Ngamma_gal += 50.0 # Old versions of SAGE incorrectly
# subtracted 50.
Ngamma_gal *= fesc
reionmod = G.GridReionMod[w_gal, current_snap]
mass_reionmod_central = mass_central[reionmod > -1]
mass_reionmod_gal = mass_gal[reionmod > -1]
reionmod = reionmod[reionmod > -1] # Some satellite galaxies that don't have HotGas and hence won't be stripped. As a result reionmod = -1 for these. Ignore them.
mass_BH = G.GridBHMass[w_gal, current_snap] * 1.0e10 / AllVars.Hubble_h # Msun. Not log units.
L_UV = SFR_gal + 39.927 # Using relationship from STARBURST99, units of erg s^-1 A^-1. Log Units.
M_UV = AllVars.Luminosity_to_ABMag(L_UV, 1600)
if (do_observed_LF == 1): # Calculate the UV extinction if requested.
M_UV_obs = calculate_UV_extinction(AllVars.SnapZ[current_snap], L_UV, M_UV[snap_idx])
galaxy_halo_mass_mean_local, galaxy_halo_mass_std_local = Calculate_HaloPartStellarMass(halo_part_count, mass_gal, stellar_mass_halolen_lower[current_model_number], stellar_mass_halolen_upper[current_model_number]) # This is the average stellar mass for galaxies whose halos have the specified number of particles.
galaxy_halo_mass_mean[current_model_number][snapshot_idx] += pow(10, galaxy_halo_mass_mean_local) / (LastFile[current_model_number] + 1) # Adds to the average of the mean.
photofield_path = "{0}_{1:03d}".format(photo_array[current_model_number],
current_snap)
#photo_gal = photo.calc_gal_photoion(G.GridHistory[w_gal, current_snap],
# photofield_path,
# GridSize_array[current_model_number],
# precision_array[current_model_number])
#zreion_path = "{0}".format(zreion_array[current_model_number])
#zreion_gal = photo.calc_gal_zreion(G.GridHistory[w_gal, current_snap],
# zreion_path,
# GridSize_array[current_model_number],
# precision_array[current_model_number])
z_0 = 8.0
z_r = 7.0
gnedin_mfilt = ga.get_filter_mass(np.array(AllVars.SnapZ[current_snap]),
z_0, z_r)
gnedin_reionmod_gal = 1.0 / pow(1.0 + 0.26*pow(10, gnedin_mfilt - mass_central), 3.0)
###########################################
######## BASE PROPERTIES CALCULATED #######
###########################################
# Time to calculate relevant statistics.
### Functions of Galaxies/Stellar Mass ###
## Stellar Mass Function ##
(counts_local, bin_edges, bin_middle) = AllVars.Calculate_Histogram(mass_gal, bin_width, 0, m_gal_low, m_gal_high) # Bin the Stellar Mass
SMF[current_model_number][snapshot_idx] += counts_local
## Escape Fraction ##
(mean_fesc_galaxy_local, std_fesc_galaxy_local, N_local, sum_fesc_galaxy, bin_middle) = AllVars.Calculate_2D_Mean(mass_gal, fesc, bin_width, m_gal_low, m_gal_high)
(mean_fesc_galaxy_array[current_model_number][snapshot_idx], std_fesc_galaxy_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_fesc_galaxy_array[current_model_number][snapshot_idx], std_fesc_galaxy_array[current_model_number][snapshot_idx], N_galaxy_array[current_model_number][snapshot_idx], mean_fesc_galaxy_local, std_fesc_galaxy_local, N_local)
## Black Hole Mass ##
(mean_BHmass_galaxy_local, std_BHmass_galaxy_local, N_local, sum_BHmass_galaxy, bin_middle) = AllVars.Calculate_2D_Mean(mass_gal, mass_BH, bin_width, m_gal_low, m_gal_high)
(mean_BHmass_galaxy_array[current_model_number][snapshot_idx], std_BHmass_galaxy_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_BHmass_galaxy_array[current_model_number][snapshot_idx], std_BHmass_galaxy_array[current_model_number][snapshot_idx], N_galaxy_array[current_model_number][snapshot_idx], mean_BHmass_galaxy_local, std_BHmass_galaxy_local, N_local)
## Total Dust Mass ##
(mean_dust_galaxy_local, std_dust_galaxy_local, N_local,
sum_dust_galaxy, bin_middle) = AllVars.Calculate_2D_Mean(
mass_gal_dust, total_dust_gal,
bin_width, m_gal_low,
m_gal_high)
(mean_dust_galaxy_array[current_model_number][snapshot_idx],
std_dust_galaxy_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_dust_galaxy_array[current_model_number][snapshot_idx],
std_dust_galaxy_array[current_model_number][snapshot_idx],
N_galaxy_array[current_model_number][snapshot_idx],
mean_dust_galaxy_local,
std_dust_galaxy_local,
N_local)
## Star Formation Rate ##
(mean_sfr_galaxy_local, std_sfr_galaxy_local, N_local,
sum_sfr_galaxy, bin_middle) = AllVars.Calculate_2D_Mean(
mass_SFR_gal, SFR_gal,
bin_width, m_gal_low,
m_gal_high)
(mean_sfr_galaxy_array[current_model_number][snapshot_idx],
std_sfr_galaxy_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_sfr_galaxy_array[current_model_number][snapshot_idx],
std_sfr_galaxy_array[current_model_number][snapshot_idx],
N_galaxy_array[current_model_number][snapshot_idx],
mean_sfr_galaxy_local,
std_sfr_galaxy_local,
N_local)
## Specific Star Formation Rate ##
(mean_ssfr_galaxy_local, std_ssfr_galaxy_local, N_local,
sum_ssfr_galaxy, bin_middle) = AllVars.Calculate_2D_Mean(
mass_SFR_gal, sSFR_gal,
bin_width, m_gal_low,
m_gal_high)
(mean_ssfr_galaxy_array[current_model_number][snapshot_idx],
std_ssfr_galaxy_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_ssfr_galaxy_array[current_model_number][snapshot_idx],
std_ssfr_galaxy_array[current_model_number][snapshot_idx],
N_galaxy_array[current_model_number][snapshot_idx],
mean_ssfr_galaxy_local,
std_ssfr_galaxy_local,
N_local)
## Number of Ionizing Photons ##
(mean_Ngamma_galaxy_local, std_Ngamma_galaxy_local, N_local,
sum_Ngamma_galaxy_local, bin_middle) = AllVars.Calculate_2D_Mean(
mass_gal, Ngamma_gal,
bin_width, m_gal_low,
m_gal_high)
(mean_Ngamma_galaxy_array[current_model_number][snapshot_idx],
std_Ngamma_galaxy_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_Ngamma_galaxy_array[current_model_number][snapshot_idx],
std_Ngamma_galaxy_array[current_model_number][snapshot_idx],
N_galaxy_array[current_model_number][snapshot_idx],
mean_Ngamma_galaxy_local,
std_Ngamma_galaxy_local,
N_local)
## Photoionization rate ##
'''
(mean_photo_galaxy_local, std_photo_galaxy_local, N_local,
sum_photo_galaxy_local, bin_middle) = AllVars.Calculate_2D_Mean(
mass_gal, photo_gal,
bin_width, m_gal_low,
m_gal_high)
(mean_photo_galaxy_array[current_model_number][snapshot_idx],
std_photo_galaxy_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_photo_galaxy_array[current_model_number][snapshot_idx],
std_photo_galaxy_array[current_model_number][snapshot_idx],
N_galaxy_array[current_model_number][snapshot_idx],
mean_photo_galaxy_local,
std_photo_galaxy_local,
N_local)
'''
## RSAGE Reionization Modifier ##
(mean_reionmod_galaxy_local, std_reionmod_galaxy_local, N_local,
sum_reionmod_galaxy_local, bin_middle) = AllVars.Calculate_2D_Mean(
mass_reionmod_gal, reionmod,
bin_width, m_gal_low,
m_gal_high)
(mean_reionmod_galaxy_array[current_model_number][snapshot_idx],
std_reionmod_galaxy_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_reionmod_galaxy_array[current_model_number][snapshot_idx],
std_reionmod_galaxy_array[current_model_number][snapshot_idx],
N_galaxy_array[current_model_number][snapshot_idx],
mean_reionmod_galaxy_local,
std_reionmod_galaxy_local,
N_local)
## Gnedin Reionization Modifier ##
(mean_gnedin_reionmod_galaxy_local, std_gnedin_reionmod_galaxy_local, N_local,
sum_gnedin_reionmod_galaxy_local, bin_middle) = AllVars.Calculate_2D_Mean(
mass_gal, gnedin_reionmod_gal,
bin_width, m_gal_low,
m_gal_high)
(mean_gnedin_reionmod_galaxy_array[current_model_number][snapshot_idx],
std_gnedin_reionmod_galaxy_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_gnedin_reionmod_galaxy_array[current_model_number][snapshot_idx],
std_gnedin_reionmod_galaxy_array[current_model_number][snapshot_idx],
N_galaxy_array[current_model_number][snapshot_idx],
mean_gnedin_reionmod_galaxy_local,
std_gnedin_reionmod_galaxy_local,
N_local)
N_galaxy_array[current_model_number][snapshot_idx] += N_local
### Functions of Halos/Halo Mass ###
## Ejected Fraction ##
(mean_ejected_halo_local, std_ejected_halo_local, N_local, sum_ejected_halo, bin_middle) = AllVars.Calculate_2D_Mean(mass_central, ejected_fraction, bin_width, m_low, m_high)
(mean_ejected_halo_array[current_model_number][snapshot_idx], std_ejected_halo_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_ejected_halo_array[current_model_number][snapshot_idx], std_ejected_halo_array[current_model_number][snapshot_idx], N_halo_array[current_model_number][snapshot_idx], mean_ejected_halo_local, std_ejected_halo_local, N_local) # Then update the running total.
## Quasar Fraction ##
(mean_quasar_activity_local, std_quasar_activity_local,N_local, sum_quasar_activity_halo, bin_middle) = AllVars.Calculate_2D_Mean(mass_central, G.QuasarActivity[w_gal, current_snap], bin_width, m_low, m_high)
(mean_quasar_activity_array[current_model_number][snapshot_idx], std_quasar_activity_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_quasar_activity_array[current_model_number][snapshot_idx], std_quasar_activity_array[current_model_number][snapshot_idx], N_halo_array[current_model_number][snapshot_idx], mean_quasar_activity_local, std_quasar_activity_local, N_local) # Then update the running total.
## fesc Value ##
(mean_fesc_halo_local, std_fesc_halo_local, N_local, sum_fesc_halo, bin_middle) = AllVars.Calculate_2D_Mean(mass_central, fesc, bin_width, m_low, m_high)
(mean_fesc_halo_array[current_model_number][snapshot_idx], std_fesc_halo_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_fesc_halo_array[current_model_number][snapshot_idx], std_fesc_halo_array[current_model_number][snapshot_idx], N_halo_array[current_model_number][snapshot_idx], mean_fesc_halo_local, std_fesc_halo_local, N_local) # Then update the running total.
## Ngamma ##
#(mean_Ngamma_halo_local, std_Ngamma_halo_local, N_local, sum_Ngamma_halo, bin_middle) \
#= AllVars.Calculate_2D_Mean(mass_central, ionizing_photons, bin_width, m_low, m_high)
#mean_Ngamma_halo_local = np.divide(mean_Ngamma_halo_local, 1.0e50) ## Divide out a constant to keep the numbers manageable.
#std_Ngamma_halo_local = np.divide(std_Ngamma_halo_local, 1.0e50)
#(mean_Ngamma_halo_array[current_model_number][snapshot_idx], std_Ngamma_halo_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_Ngamma_halo_array[current_model_number][snapshot_idx], std_Ngamma_halo_array[current_model_number][snapshot_idx], N_halo_array[current_model_number][snapshot_idx], mean_Ngamma_halo_local, std_Ngamma_halo_local, N_local) # Then update the running total.
## Reionization Modifier ##
(mean_reionmod_halo_local, std_reionmod_halo_local, N_local, sum_reionmod_halo, bin_middle) = AllVars.Calculate_2D_Mean(mass_reionmod_central, reionmod, bin_width, m_low, m_high)
(mean_reionmod_halo_array[current_model_number][snapshot_idx], std_reionmod_halo_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_reionmod_halo_array[current_model_number][snapshot_idx], std_reionmod_halo_array[current_model_number][snapshot_idx], N_halo_array[current_model_number][snapshot_idx], mean_reionmod_halo_local, std_reionmod_halo_local, N_local) # Then update the running total.
## Total Dust Mass ##
(mean_dust_halo_local, std_dust_halo_local, N_local,
sum_dust_halo, bin_middle) = AllVars.Calculate_2D_Mean(
mass_centralgal_dust, total_dust_gal,
bin_width, m_low,
m_high)
(mean_dust_halo_array[current_model_number][snapshot_idx],
std_dust_halo_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_dust_halo_array[current_model_number][snapshot_idx],
std_dust_halo_array[current_model_number][snapshot_idx],
N_halo_array[current_model_number][snapshot_idx],
mean_dust_halo_local,
std_dust_halo_local,
N_local)
N_halo_array[current_model_number][snapshot_idx] += N_local
### Functions of redshift ###
## Ngamma ##
#sum_Ngamma_z_array[current_model_number][snapshot_idx] += np.sum(np.divide(ionizing_photons, 1.0e50)) # Remember that we're dividing out a constant!
## fesc Value ##
(mean_fesc_z_array[current_model_number][snapshot_idx], std_fesc_z_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_fesc_z_array[current_model_number][snapshot_idx], std_fesc_z_array[current_model_number][snapshot_idx], N_z[current_model_number][snapshot_idx], np.mean(fesc), np.std(fesc), len(w_gal)) # Updates the mean escape fraction for this redshift.
## Reionization Modifier ##
(mean_reionmod_z[current_model_number][snapshot_idx], std_reionmod_z[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_reionmod_z[current_model_number][snapshot_idx], std_reionmod_z[current_model_number][snapshot_idx], N_reionmod_z[current_model_number][snapshot_idx], np.mean(reionmod), np.std(reionmod), len(reionmod))
N_reionmod_z[current_model_number][snapshot_idx] += len(reionmod)
## Ejected Fraction ##
(mean_ejected_z[current_model_number][snapshot_idx],std_ejected_z[current_model_number][snapshot_idx]) \
= update_cumulative_stats(mean_ejected_z[current_model_number][snapshot_idx],
std_ejected_z[current_model_number][snapshot_idx],
N_z[current_model_number][snapshot_idx],
np.mean(ejected_fraction),
np.std(ejected_fraction),
len(w_gal))
N_z[current_model_number][snapshot_idx] += len(w_gal)
#### Arrays that are just kept across snapshots ##
Ngamma_global[current_model_number][snapshot_idx].append(Ngamma_gal)
mass_global[current_model_number][snapshot_idx].append(mass_gal)
fesc_global[current_model_number][snapshot_idx].append(fesc)
#### Arrays that are function of fej ##
(mean_Ngamma_fej_local, std_Ngamma_fej_local, N_local,
sum_Ngamma_fej_local, bin_middle) = AllVars.Calculate_2D_Mean(
ejected_fraction, Ngamma_gal,
fej_bin_width, fej_low, fej_high)
(mean_Ngamma_fej[current_model_number][snapshot_idx],
std_Ngamma_fej[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_Ngamma_fej[current_model_number][snapshot_idx],
std_Ngamma_fej[current_model_number][snapshot_idx],
N_fej[current_model_number][snapshot_idx],
mean_Ngamma_fej_local,
std_Ngamma_fej_local,
N_local)
N_fej[current_model_number][snapshot_idx] += N_local
done_model[current_model_number] = 1
if (current_model_number < number_models):
keep_files = same_files[current_model_number] # Decide if we want to keep the files loaded or throw them out.
current_model_number += 1 # Update the inner loop model number.
#StellarMassFunction(PlotSnapList, SMF, simulation_norm, FirstFile,
# LastFile, NumFile, galaxy_halo_mass_mean, model_tags,
# 1, paper_plots, "wtf")
#plot_reionmod(PlotSnapList, SnapList, simulation_norm, mean_reionmod_halo_array,
#std_reionmod_halo_array, N_halo_array, mean_reionmod_z,
#std_reionmod_z, N_reionmod_z, False, model_tags,
#"reionmod_selfcon")
#plot_dust_scatter(SnapList, mass_gal_dust, mass_centralgal_dust, total_dust_gal,
# "dust_scatter")
#plot_dust(PlotSnapList, SnapList, simulation_norm, mean_dust_galaxy_array,
# std_dust_galaxy_array, N_galaxy_array, mean_dust_halo_array,
# std_dust_halo_array, N_halo_array, False, model_tags,
# "dustmass_total")
#plot_stellarmass_blackhole(PlotSnapList, simulation_norm, mean_BHmass_galaxy_array,
# std_BHmass_galaxy_array, N_galaxy_array,
# FirstFile, LastFile, NumFile,
# model_tags, "StellarMass_BHMass")
#plot_ejectedfraction(SnapList, PlotSnapList, simulation_norm,
# mean_ejected_halo_array, std_ejected_halo_array,
# N_halo_array, mean_ejected_z, std_ejected_z, N_z,
# model_tags, "ejectedfraction")
#plot_quasars_count(SnapList, PlotSnapList, N_quasars_z, N_quasars_boost_z, N_z, mean_quasar_activity_array, std_quasar_activity_array, N_halo_array, mergers_halo_array, SMF, mergers_galaxy_array, fesc_prescription, simulation_norm, FirstFile, LastFile, NumFile, model_tags, "SN_Prescription")
plot_fesc_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_fesc_galaxy_array, std_fesc_galaxy_array,
N_galaxy_array, mean_fesc_halo_array,
std_fesc_halo_array, N_halo_array,
galaxy_halo_mass_mean, model_tags,
paper_plots, mass_global, fesc_global, Ngamma_global,
"fesc_paper")
plot_reionmod_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_reionmod_galaxy_array, std_reionmod_galaxy_array,
N_galaxy_array, mean_gnedin_reionmod_galaxy_array,
std_gnedin_reionmod_galaxy_array,
model_tags, paper_plots, "reionmod")
exit()
#plot_nion_galaxy(SnapList, PlotSnapList, simulation_norm,
# mean_Ngamma_galaxy_array, std_Ngamma_galaxy_array,
# N_galaxy_array, model_tags,
# paper_plots, "Ngamma")
'''
plot_photo_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_photo_galaxy_array, std_photo_galaxy_array,
N_galaxy_array, model_tags,
paper_plots, "photo")
'''
plot_sfr_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_sfr_galaxy_array, std_sfr_galaxy_array,
mean_ssfr_galaxy_array, std_ssfr_galaxy_array,
N_galaxy_array, model_tags, "sSFR")
#plot_fej_Ngamma(SnapList, PlotSnapList, simulation_norm,
# mean_Ngamma_fej, std_Ngamma_fej,
# N_fej, model_tags, "Ngamma_fej")
#plot_photoncount(SnapList, sum_Ngamma_z_array, simulation_norm, FirstFile, LastFile, NumFile, model_tags, "Ngamma_test") ## PARALELL COMPATIBLE
#plot_mvir_Ngamma(SnapList, mean_Ngamma_halo_array, std_Ngamma_halo_array, N_halo_array, model_tags, "Mvir_Ngamma_test", fesc_prescription, fesc_normalization, "/lustre/projects/p004_swin/jseiler/tiamat/halo_ngamma/") ## PARALELL COMPATIBLE
| 51.258188 | 474 | 0.610478 | 30,778 | 241,016 | 4.540224 | 0.04409 | 0.061715 | 0.037119 | 0.04298 | 0.748345 | 0.703733 | 0.653239 | 0.616263 | 0.578278 | 0.544987 | 0 | 0.025656 | 0.281467 | 241,016 | 4,701 | 475 | 51.269092 | 0.781254 | 0.256813 | 0 | 0.478646 | 0 | 0.009619 | 0.066813 | 0.016578 | 0 | 0 | 0 | 0 | 0.001154 | 1 | 0.01616 | false | 0.001539 | 0.011928 | 0.00077 | 0.03309 | 0.036553 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81b28caa54d539dfc14006299c0cf1e06133e78c | 1,537 | py | Python | utils/deserializer/__tests__/test_protobuf_deserializer.py | Mouse-BB-Team/Bot-Detection | 4438d8ccec1baaa22f3357213e6d52a62ff6d618 | [
"MIT"
] | 5 | 2020-09-30T16:58:59.000Z | 2021-11-30T22:34:10.000Z | utils/deserializer/__tests__/test_protobuf_deserializer.py | Mouse-BB-Team/Bot-Detection | 4438d8ccec1baaa22f3357213e6d52a62ff6d618 | [
"MIT"
] | null | null | null | utils/deserializer/__tests__/test_protobuf_deserializer.py | Mouse-BB-Team/Bot-Detection | 4438d8ccec1baaa22f3357213e6d52a62ff6d618 | [
"MIT"
] | null | null | null | from utils.deserializer.protobuf_deserializer import ProtoLoader
from pathlib import Path
import pandas as pd
import pytest
PROTOFILES_DIR_PATH = Path(__file__).parent.joinpath("protofilesdir").absolute().__str__()
INVALID_PATH = "some/wrong/path"
@pytest.mark.parametrize('filepath', ["test_file.pb", "test_file_1.txt", "test_file_2.xml"])
def test_should_return_single_df_sequence_regardless_file_extension(filepath):
loader = ProtoLoader(PROTOFILES_DIR_PATH)
sequence = loader.get_single_sequence(filepath)
assert isinstance(sequence, pd.DataFrame)
def test_should_return_not_none_when_directory_not_empty():
loader = ProtoLoader(PROTOFILES_DIR_PATH)
seq_list = loader.get_list_of_sequences()
assert seq_list is not None
def test_should_return_correct_length_of_seq_list():
loader = ProtoLoader(PROTOFILES_DIR_PATH)
seq_list = loader.get_list_of_sequences()
assert len(seq_list) == 3
def test_should_return_empty_list_when_directory_empty():
loader = ProtoLoader(PROTOFILES_DIR_PATH + INVALID_PATH)
seq_list = loader.get_list_of_sequences()
assert len(seq_list) == 0
def test_should_check_for_list_when_directory_empty():
loader = ProtoLoader(PROTOFILES_DIR_PATH + INVALID_PATH)
seq_list = loader.get_list_of_sequences()
assert isinstance(seq_list, list)
def test_should_return_list_of_sequences():
loader = ProtoLoader(PROTOFILES_DIR_PATH)
seq_list = loader.get_list_of_sequences()
for seq in seq_list:
assert isinstance(seq, pd.DataFrame)
| 33.413043 | 92 | 0.791802 | 213 | 1,537 | 5.244131 | 0.295775 | 0.068935 | 0.106535 | 0.161146 | 0.424351 | 0.393912 | 0.389436 | 0.389436 | 0.389436 | 0.389436 | 0 | 0.002981 | 0.126871 | 1,537 | 45 | 93 | 34.155556 | 0.829359 | 0 | 0 | 0.34375 | 0 | 0 | 0.050748 | 0 | 0 | 0 | 0 | 0 | 0.1875 | 1 | 0.1875 | false | 0 | 0.125 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81b2cfe5a1a59f76e8e712dc7fabc6c32050694c | 18,966 | py | Python | wisdem/assemblies/turbinese/turbine_se_seam.py | dzalkind/WISDEM | 724a7bf9c19bad3ca7e18c240628d1a75b07e3f0 | [
"Apache-2.0"
] | 1 | 2020-01-22T17:48:30.000Z | 2020-01-22T17:48:30.000Z | wisdem/assemblies/turbinese/turbine_se_seam.py | dzalkind/WISDEM | 724a7bf9c19bad3ca7e18c240628d1a75b07e3f0 | [
"Apache-2.0"
] | 17 | 2019-09-13T22:21:15.000Z | 2019-10-25T20:04:26.000Z | wisdem/assemblies/turbinese/turbine_se_seam.py | dzalkind/WISDEM | 724a7bf9c19bad3ca7e18c240628d1a75b07e3f0 | [
"Apache-2.0"
] | 2 | 2019-03-21T10:38:05.000Z | 2021-01-08T18:49:53.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
turbine.py
Created by Andrew Ning and Katherine Dykes on 2014-01-13.
Copyright (c) NREL. All rights reserved.
"""
from openmdao.main.api import Assembly, Component
from openmdao.main.datatypes.api import Float, Array, Enum, Bool, Int
from openmdao.lib.drivers.api import FixedPointIterator
import numpy as np
#from rotorse.rotor import RotorSE
#from towerse.tower import TowerSE
#from commonse.rna import RNAMass, RotorLoads
from drivewpact.drive import DriveWPACT
from drivewpact.hub import HubWPACT
from commonse.csystem import DirectionVector
from commonse.utilities import interp_with_deriv, hstack, vstack
from drivese.drive import Drive4pt, Drive3pt
from drivese.drivese_utils import blade_moment_transform, blade_force_transform
from drivese.hub import HubSE, Hub_System_Adder_drive
from SEAMLoads.SEAMLoads import SEAMLoads
from SEAMTower.SEAMTower import SEAMTower
from SEAMAero.SEAM_AEP import SEAM_PowerCurve
from SEAMRotor.SEAMRotor import SEAMBladeStructure
# from SEAMGeometry.SEAMGeometry import SEAMGeometry
def connect_io(top, cls):
cls_name = cls.name
for name in cls.list_inputs():
try:
top.connect(name, cls_name + '.%s' % name)
except:
# print 'failed connecting', cls_name, name
pass
for name in cls.list_outputs():
try:
top.connect(cls_name + '.%s' % name, name)
except:
pass
def configure_turbine(assembly, with_new_nacelle=True, flexible_blade=False, with_3pt_drive=False):
"""a stand-alone configure method to allow for flatter assemblies
Parameters
----------
assembly : Assembly
an openmdao assembly to be configured
with_new_nacelle : bool
False uses the default implementation, True uses an experimental implementation designed
to smooth out discontinities making in amenable for gradient-based optimization
flexible_blade : bool
if True, internally solves the coupled aero/structural deflection using fixed point iteration.
Note that the coupling is currently only in the flapwise deflection, and is primarily
only important for highly flexible blades. If False, the aero loads are passed
to the structure but there is no further iteration.
"""
#SEAM variables ----------------------------------
#d2e = Float(0.73, iotype='in', desc='Dollars to Euro ratio'
assembly.add('rated_power',Float(3000., iotype='in', units='kW', desc='Turbine rated power', group='Global'))
assembly.add('hub_height', Float(100., iotype='in', units='m', desc='Hub height', group='Global'))
assembly.add('rotor_diameter', Float(110., iotype='in', units='m', desc='Rotor diameter', group='Global'))
# assembly.add('site_type',Enum('onshore', values=('onshore', 'offshore'), iotype='in', desc='Site type', group='Global'))
assembly.add('tower_bottom_diameter', Float(4., iotype='in', desc='Tower bottom diameter', group='Global'))
assembly.add('tower_top_diameter', Float(2., iotype='in', desc='Tower top diameter', group='Global'))
assembly.add('project_lifetime', Float(iotype = 'in', desc='Operating years', group='Global'))
assembly.add('rho_steel', Float(7.8e3, iotype='in', desc='density of steel', group='Tower'))
assembly.add('lifetime_cycles', Float(1.e7, iotype='in', desc='Equivalent lifetime cycles', group='Rotor'))
assembly.add('stress_limit_extreme_tower', Float(iotype='in', units='MPa', desc='Tower ultimate strength', group='Tower'))
assembly.add('stress_limit_fatigue_tower', Float(iotype='in', units='MPa', desc='Tower fatigue strength', group='Tower'))
assembly.add('safety_factor_tower', Float(iotype='in', desc='Tower loads safety factor', group='Tower'))
assembly.add('PMtarget_tower', Float(1., iotype='in', desc='', group='Tower'))
assembly.add('wohler_exponent_tower', Float(4., iotype='in', desc='Tower fatigue Wohler exponent', group='Tower'))
assembly.add('tower_z', Array(iotype='out', desc='Tower discretization'))
assembly.add('tower_wall_thickness', Array(iotype='out', units='m', desc='Tower wall thickness'))
assembly.add('tower_mass', Float(iotype='out', units='kg', desc='Tower mass'))
assembly.add('tsr', Float(iotype='in', units='m', desc='Design tip speed ratio', group='Aero'))
assembly.add('F', Float(iotype='in', desc='Rotor power loss factor', group='Aero'))
assembly.add('wohler_exponent_blade_flap', Float(iotype='in', desc='Wohler Exponent blade flap', group='Rotor'))
assembly.add('nSigma4fatFlap', Float(iotype='in', desc='', group='Loads'))
assembly.add('nSigma4fatTower', Float(iotype='in', desc='', group='Loads'))
assembly.add('dLoad_dU_factor_flap', Float(iotype='in', desc='', group='Loads'))
assembly.add('dLoad_dU_factor_tower', Float(iotype='in', desc='', group='Loads'))
assembly.add('blade_edge_dynload_factor_ext', Float(iotype='in', desc='Extreme dynamic edgewise loads factor', group='Loads'))
assembly.add('blade_edge_dynload_factor_fat', Float(iotype='in', desc='Fatigue dynamic edgewise loads factor', group='Loads'))
assembly.add('PMtarget_blades', Float(1., iotype='in', desc='', group='Rotor'))
assembly.add('max_tipspeed', Float(iotype='in', desc='Maximum tip speed', group='Aero'))
assembly.add('n_wsp', Int(iotype='in', desc='Number of wind speed bins', group='Aero'))
assembly.add('min_wsp', Float(0.0, iotype = 'in', units = 'm/s', desc = 'min wind speed', group='Aero'))
assembly.add('max_wsp', Float(iotype = 'in', units = 'm/s', desc = 'max wind speed', group='Aero'))
assembly.add('turbulence_int', Float(iotype='in', desc='Reference turbulence intensity', group='Plant_AEP'))
# assembly.add('WeibullInput', Bool(True, iotype='in', desc='Flag for Weibull input', group='AEP'))
assembly.add('weibull_C', Float(iotype = 'in', units='m/s', desc = 'Weibull scale factor', group='AEP'))
assembly.add('weibull_k', Float(iotype = 'in', desc='Weibull shape or form factor', group='AEP'))
assembly.add('blade_sections', Int(iotype='in', desc='number of sections along blade', group='Rotor'))
assembly.add('wohler_exponent_blade_flap', Float(iotype='in', desc='Blade flap fatigue Wohler exponent', group='Rotor'))
assembly.add('MaxChordrR', Float(iotype='in', units='m', desc='Spanwise position of maximum chord', group='Rotor'))
assembly.add('tif_blade_root_flap_ext', Float(1., iotype='in', desc='Technology improvement factor flap extreme', group='Rotor'))
assembly.add('tif_blade_root_edge_ext', Float(1., iotype='in', desc='Technology improvement factor edge extreme', group='Rotor'))
assembly.add('tif_blade_root_flap_fat', Float(1., iotype='in', desc='Technology improvement factor flap LEQ', group='Rotor'))
assembly.add('sc_frac_flap', Float(iotype='in', desc='spar cap fraction of chord', group='Rotor'))
assembly.add('sc_frac_edge', Float(iotype='in', desc='spar cap fraction of thickness', group='Rotor'))
assembly.add('safety_factor_blade', Float(iotype='in', desc='Blade loads safety factor', group='Rotor'))
assembly.add('stress_limit_extreme_blade', Float(iotype='in', units='MPa', desc='Blade ultimate strength', group='Rotor'))
assembly.add('stress_limit_fatigue_blade', Float(iotype='in', units='MPa', desc='Blade fatigue strength', group='Rotor'))
assembly.add('AddWeightFactorBlade', Float(iotype='in', desc='Additional weight factor for blade shell', group='Rotor'))
assembly.add('blade_material_density', Float(iotype='in', units='kg/m**3', desc='Average density of blade materials', group='Rotor'))
assembly.add('blade_mass', Float(iotype = 'out', units = 'kg', desc = 'Blade mass'))
# assembly.add('mean_wsp', Float(iotype = 'in', units = 'm/s', desc = 'mean wind speed', group='Aero')) # [m/s]
assembly.add('air_density', Float(iotype = 'in', units = 'kg/m**3', desc = 'density of air', group='Plant_AEP')) # [kg / m^3]
assembly.add('max_Cp', Float(iotype = 'in', desc = 'max CP', group='Aero'))
assembly.add('gearloss_const', Float(iotype = 'in', desc = 'Gear loss constant', group='Drivetrain'))
assembly.add('gearloss_var', Float(iotype = 'in', desc = 'Gear loss variable', group='Drivetrain'))
assembly.add('genloss', Float(iotype = 'in', desc = 'Generator loss', group='Drivetrain'))
assembly.add('convloss', Float(iotype = 'in', desc = 'Converter loss', group='Drivetrain'))
# Outputs
assembly.add('rated_wind_speed', Float(units = 'm / s', iotype='out', desc='wind speed for rated power'))
assembly.add('ideal_power_curve', Array(iotype='out', units='kW', desc='total power before losses and turbulence'))
assembly.add('power_curve', Array(iotype='out', units='kW', desc='total power including losses and turbulence'))
assembly.add('wind_curve', Array(iotype='out', units='m/s', desc='wind curve associated with power curve'))
assembly.add('aep', Float(iotype = 'out', units='mW*h', desc='Annual energy production in mWh'))
assembly.add('total_aep', Float(iotype = 'out', units='mW*h', desc='AEP for total years of production'))
# END SEAM Variables ----------------------
# Add SEAM components and connections
assembly.add('loads', SEAMLoads())
assembly.add('tower_design', SEAMTower(21))
assembly.add('blade_design', SEAMBladeStructure())
assembly.add('aep_calc', SEAM_PowerCurve())
assembly.driver.workflow.add(['loads', 'tower_design', 'blade_design', 'aep_calc'])
assembly.connect('loads.tower_bottom_moment_max', 'tower_design.tower_bottom_moment_max')
assembly.connect('loads.tower_bottom_moment_leq', 'tower_design.tower_bottom_moment_leq')
assembly.connect('loads.blade_root_flap_max', 'blade_design.blade_root_flap_max')
assembly.connect('loads.blade_root_edge_max', 'blade_design.blade_root_edge_max')
assembly.connect('loads.blade_root_flap_leq', 'blade_design.blade_root_flap_leq')
assembly.connect('loads.blade_root_edge_leq', 'blade_design.blade_root_edge_leq')
connect_io(assembly, assembly.aep_calc)
connect_io(assembly, assembly.loads)
connect_io(assembly, assembly.tower_design)
connect_io(assembly, assembly.blade_design)
# End SEAM add components and connections -------------
if with_new_nacelle:
assembly.add('hub',HubSE())
assembly.add('hubSystem',Hub_System_Adder_drive())
if with_3pt_drive:
assembly.add('nacelle', Drive3pt())
else:
assembly.add('nacelle', Drive4pt())
else:
assembly.add('nacelle', DriveWPACT())
assembly.add('hub', HubWPACT())
assembly.driver.workflow.add(['hub', 'nacelle'])
if with_new_nacelle:
assembly.driver.workflow.add(['hubSystem'])
# connections to hub and hub system
assembly.connect('blade_design.blade_mass', 'hub.blade_mass')
assembly.connect('loads.blade_root_flap_max', 'hub.rotor_bending_moment')
assembly.connect('rotor_diameter', ['hub.rotor_diameter'])
assembly.connect('blade_design.blade_root_diameter', 'hub.blade_root_diameter')
assembly.add('blade_number',Int(3,iotype='in',desc='number of blades', group='Aero'))
assembly.connect('blade_number', 'hub.blade_number')
if with_new_nacelle:
assembly.connect('rated_power','hub.machine_rating')
assembly.connect('rotor_diameter', ['hubSystem.rotor_diameter'])
assembly.connect('nacelle.MB1_location','hubSystem.MB1_location') # TODO: bearing locations
assembly.connect('nacelle.L_rb','hubSystem.L_rb')
assembly.add('rotor_tilt', Float(5.0, iotype='in', desc='rotor tilt', group='Rotor'))
assembly.connect('rotor_tilt','hubSystem.shaft_angle')
assembly.connect('hub.hub_diameter','hubSystem.hub_diameter')
assembly.connect('hub.hub_thickness','hubSystem.hub_thickness')
assembly.connect('hub.hub_mass','hubSystem.hub_mass')
assembly.connect('hub.spinner_mass','hubSystem.spinner_mass')
assembly.connect('hub.pitch_system_mass','hubSystem.pitch_system_mass')
# connections to nacelle #TODO: fatigue option variables
assembly.connect('rotor_diameter', 'nacelle.rotor_diameter')
assembly.connect('1.5 * aep_calc.rated_torque', 'nacelle.rotor_torque')
assembly.connect('loads.max_thrust', 'nacelle.rotor_thrust')
assembly.connect('aep_calc.rated_speed', 'nacelle.rotor_speed')
assembly.connect('rated_power', 'nacelle.machine_rating')
assembly.add('generator_speed',Float(1173.7,iotype='in',units='rpm',desc='speed of generator', group='Drivetrain')) # - should be in nacelle
assembly.connect('generator_speed/aep_calc.rated_speed', 'nacelle.gear_ratio')
assembly.connect('tower_top_diameter', 'nacelle.tower_top_diameter')
assembly.connect('blade_number * blade_design.blade_mass + hub.hub_system_mass', 'nacelle.rotor_mass') # assuming not already in rotor force / moments
# variable connections for new nacelle
if with_new_nacelle:
assembly.connect('blade_number','nacelle.blade_number')
assembly.connect('rotor_tilt','nacelle.shaft_angle')
assembly.connect('333.3 * rated_power / 1000.0','nacelle.shrink_disc_mass')
assembly.connect('blade_design.blade_root_diameter','nacelle.blade_root_diameter')
#moments - ignoring for now (nacelle will use internal defaults)
#assembly.connect('rotor.Mxyz_0','moments.b1')
#assembly.connect('rotor.Mxyz_120','moments.b2')
#assembly.connect('rotor.Mxyz_240','moments.b3')
#assembly.connect('rotor.Pitch','moments.pitch_angle')
#assembly.connect('rotor.TotalCone','moments.cone_angle')
assembly.connect('1.5 * aep_calc.rated_torque','nacelle.rotor_bending_moment_x') #accounted for in ratedConditions.Q
#assembly.connect('moments.My','nacelle.rotor_bending_moment_y')
#assembly.connect('moments.Mz','nacelle.rotor_bending_moment_z')
#forces - ignoring for now (nacelle will use internal defaults)
#assembly.connect('rotor.Fxyz_0','forces.b1')
#assembly.connect('rotor.Fxyz_120','forces.b2')
#assembly.connect('rotor.Fxyz_240','forces.b3')
#assembly.connect('rotor.Pitch','forces.pitch_angle')
#assembly.connect('rotor.TotalCone','forces.cone_angle')
assembly.connect('loads.max_thrust','nacelle.rotor_force_x')
#assembly.connect('forces.Fy','nacelle.rotor_force_y')
#assembly.connect('forces.Fz','nacelle.rotor_force_z')
class Turbine_SE_SEAM(Assembly):
def configure(self):
configure_turbine(self)
if __name__ == '__main__':
turbine = Turbine_SE_SEAM()
#=========== SEAM inputs
turbine.AddWeightFactorBlade = 1.2
turbine.blade_material_density = 2100.0
turbine.tower_bottom_diameter = 6.
turbine.tower_top_diameter = 3.78
turbine.blade_edge_dynload_factor_ext = 2.5
turbine.blade_edge_dynload_factor_fat = 0.75
turbine.F = 0.777
turbine.MaxChordrR = 0.2
turbine.project_lifetime = 20.0
turbine.lifetime_cycles = 10000000.0
turbine.blade_sections = 21
turbine.PMtarget_tower = 1.0
turbine.PMtarget_blades = 1.0
turbine.safety_factor_blade = 1.1
turbine.safety_factor_tower = 1.5
turbine.stress_limit_extreme_tower = 235.0
turbine.stress_limit_fatigue_tower = 14.885
turbine.stress_limit_extreme_blade = 200.0
turbine.stress_limit_fatigue_blade = 27.0
turbine.tif_blade_root_flap_ext = 1.0
turbine.tif_blade_root_flap_fat = 1.0
turbine.tif_blade_root_edge_ext = 1.0
turbine.weibull_C = 11.0
turbine.weibull_k = 2.0
turbine.wohler_exponent_blade_flap = 10.0
turbine.wohler_exponent_tower = 4.0
turbine.dLoad_dU_factor_flap = 0.9
turbine.dLoad_dU_factor_tower = 0.8
turbine.hub_height = 90.0
turbine.max_tipspeed = 80.0
turbine.n_wsp = 26
turbine.min_wsp = 0.0
turbine.max_wsp = 25.0
turbine.nSigma4fatFlap = 1.2
turbine.nSigma4fatTower = 0.8
turbine.rated_power = 5000.0
turbine.rho_steel = 7800.0
turbine.rotor_diameter = 126.0
turbine.sc_frac_edge = 0.8
turbine.sc_frac_flap = 0.3
turbine.tsr = 8.0
turbine.air_density = 1.225
turbine.turbulence_int = 0.16
turbine.max_Cp = 0.49
turbine.gearloss_const = 0.01 # Fraction
turbine.gearloss_var = 0.014 # Fraction
turbine.genloss = 0.03 # Fraction
turbine.convloss = 0.03 # Fraction
#==============
# === nacelle ======
turbine.blade_number = 3 # turbine level that must be added for SEAM
turbine.rotor_tilt = 5.0 # turbine level that must be added for SEAM
turbine.generator_speed = 1173.7
turbine.nacelle.L_ms = 1.0 # (Float, m): main shaft length downwind of main bearing in low-speed shaft
turbine.nacelle.L_mb = 2.5 # (Float, m): main shaft length in low-speed shaft
turbine.nacelle.h0_front = 1.7 # (Float, m): height of Ibeam in bedplate front
turbine.nacelle.h0_rear = 1.35 # (Float, m): height of Ibeam in bedplate rear
turbine.nacelle.drivetrain_design = 'geared'
turbine.nacelle.crane = True # (Bool): flag for presence of crane
turbine.nacelle.bevel = 0 # (Int): Flag for the presence of a bevel stage - 1 if present, 0 if not
turbine.nacelle.gear_configuration = 'eep' # (Str): tring that represents the configuration of the gearbox (stage number and types)
turbine.nacelle.Np = [3, 3, 1] # (Array): number of planets in each stage
turbine.nacelle.ratio_type = 'optimal' # (Str): optimal or empirical stage ratios
turbine.nacelle.shaft_type = 'normal' # (Str): normal or short shaft length
#turbine.nacelle.shaft_angle = 5.0 # (Float, deg): Angle of the LSS inclindation with respect to the horizontal
turbine.nacelle.shaft_ratio = 0.10 # (Float): Ratio of inner diameter to outer diameter. Leave zero for solid LSS
turbine.nacelle.carrier_mass = 8000.0 # estimated for 5 MW
turbine.nacelle.mb1Type = 'CARB' # (Str): Main bearing type: CARB, TRB or SRB
turbine.nacelle.mb2Type = 'SRB' # (Str): Second bearing type: CARB, TRB or SRB
turbine.nacelle.yaw_motors_number = 8.0 # (Float): number of yaw motors
turbine.nacelle.uptower_transformer = True
turbine.nacelle.flange_length = 0.5 #m
turbine.nacelle.gearbox_cm = 0.1
turbine.nacelle.hss_length = 1.5
turbine.nacelle.overhang = 5.0 #TODO - should come from turbine configuration level
turbine.nacelle.check_fatigue = 0 #0 if no fatigue check, 1 if parameterized fatigue check, 2 if known loads inputs
# =================
# === run ===
turbine.run()
print 'mass rotor blades (kg) =', turbine.blade_number * turbine.blade_design.blade_mass
print 'mass hub system (kg) =', turbine.hubSystem.hub_system_mass
print 'mass nacelle (kg) =', turbine.nacelle.nacelle_mass
print 'mass tower (kg) =', turbine.tower_design.tower_mass
# =================
| 54.188571 | 154 | 0.703048 | 2,575 | 18,966 | 5.015146 | 0.185631 | 0.063884 | 0.037169 | 0.030277 | 0.316943 | 0.226189 | 0.154871 | 0.119405 | 0.062955 | 0.039957 | 0 | 0.017922 | 0.152694 | 18,966 | 349 | 155 | 54.34384 | 0.785688 | 0.165243 | 0 | 0.051724 | 0 | 0 | 0.306814 | 0.090042 | 0 | 0 | 0 | 0.002865 | 0 | 0 | null | null | 0.008621 | 0.064655 | null | null | 0.017241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81b69499f86483624239f156b1fed165ba08aee8 | 1,770 | py | Python | generated-libraries/python/netapp/fcp/aliases_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | 2 | 2017-03-28T15:31:26.000Z | 2018-08-16T22:15:18.000Z | generated-libraries/python/netapp/fcp/aliases_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | generated-libraries/python/netapp/fcp/aliases_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | from netapp.netapp_object import NetAppObject
class AliasesInfo(NetAppObject):
"""
A list of WWPNs and their aliases generated according
to the input - alias, WWPN or nothing.
"""
_vserver = None
@property
def vserver(self):
"""
Vserver containing the alias
"""
return self._vserver
@vserver.setter
def vserver(self, val):
if val != None:
self.validate('vserver', val)
self._vserver = val
_aliases_wwpn = None
@property
def aliases_wwpn(self):
"""
The FCP WWPN for which the alias is given
"""
return self._aliases_wwpn
@aliases_wwpn.setter
def aliases_wwpn(self, val):
if val != None:
self.validate('aliases_wwpn', val)
self._aliases_wwpn = val
_aliases_alias = None
@property
def aliases_alias(self):
"""
The 32-character alias for a given FCP WWPN
"""
return self._aliases_alias
@aliases_alias.setter
def aliases_alias(self, val):
if val != None:
self.validate('aliases_alias', val)
self._aliases_alias = val
@staticmethod
def get_api_name():
return "aliases-info"
@staticmethod
def get_desired_attrs():
return [
'vserver',
'aliases-wwpn',
'aliases-alias',
]
def describe_properties(self):
return {
'vserver': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'aliases_wwpn': { 'class': basestring, 'is_list': False, 'required': 'required' },
'aliases_alias': { 'class': basestring, 'is_list': False, 'required': 'required' },
}
| 26.818182 | 95 | 0.565537 | 186 | 1,770 | 5.198925 | 0.290323 | 0.102378 | 0.046536 | 0.037229 | 0.223371 | 0.223371 | 0.188211 | 0.072389 | 0 | 0 | 0 | 0.001684 | 0.328814 | 1,770 | 65 | 96 | 27.230769 | 0.81229 | 0.116949 | 0 | 0.177778 | 1 | 0 | 0.130258 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.022222 | 0.066667 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81b8a377f7e00482ba8d3e94e5cc8f42cb23bfce | 28,078 | py | Python | tests/test_fitting.py | adrdrew/viroconcom | 3eb748ba8e3e076eddd174a0fcdfee3917aa4045 | [
"MIT"
] | null | null | null | tests/test_fitting.py | adrdrew/viroconcom | 3eb748ba8e3e076eddd174a0fcdfee3917aa4045 | [
"MIT"
] | 1 | 2020-05-18T11:06:28.000Z | 2020-05-18T11:06:28.000Z | tests/test_fitting.py | adrdrew/viroconcom | 3eb748ba8e3e076eddd174a0fcdfee3917aa4045 | [
"MIT"
] | null | null | null | import unittest
import csv
import numpy as np
from viroconcom.fitting import Fit
def read_benchmark_dataset(path='tests/testfiles/1year_dataset_A.txt'):
"""
Reads a datasets provided for the environmental contour benchmark.
Parameters
----------
path : string
Path to dataset including the file name, defaults to 'examples/datasets/A.txt'
Returns
-------
x : ndarray of doubles
Observations of the environmental variable 1.
y : ndarray of doubles
Observations of the environmental variable 2.
x_label : str
Label of the environmantal variable 1.
y_label : str
Label of the environmental variable 2.
"""
x = list()
y = list()
x_label = None
y_label = None
with open(path, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=';')
idx = 0
for row in reader:
if idx == 0:
x_label = row[1][
1:] # Ignore first char (is a white space).
y_label = row[2][
1:] # Ignore first char (is a white space).
if idx > 0: # Ignore the header
x.append(float(row[1]))
y.append(float(row[2]))
idx = idx + 1
x = np.asarray(x)
y = np.asarray(y)
return (x, y, x_label, y_label)
class FittingTest(unittest.TestCase):
def test_2d_fit(self):
"""
2-d Fit with Weibull and Lognormal distribution.
"""
prng = np.random.RandomState(42)
# Draw 1000 samples from a Weibull distribution with shape=1.5 and scale=3,
# which represents significant wave height.
sample_1 = prng.weibull(1.5, 1000)*3
# Let the second sample, which represents spectral peak period increase
# with significant wave height and follow a Lognormal distribution with
# mean=2 and sigma=0.2
sample_2 = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_1]
# Describe the distribution that should be fitted to the sample.
dist_description_0 = {'name': 'Weibull_3p',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
# Compute the fit.
my_fit = Fit((sample_1, sample_2),
(dist_description_0, dist_description_1))
dist0 = my_fit.mul_var_dist.distributions[0]
dist1 = my_fit.mul_var_dist.distributions[1]
self.assertAlmostEqual(dist0.shape(0), 1.4165147571863412, places=5)
self.assertAlmostEqual(dist0.scale(0), 2.833833521811032, places=5)
self.assertAlmostEqual(dist0.loc(0), 0.07055663251419833, places=5)
self.assertAlmostEqual(dist1.shape(0), 0.17742685807554776 , places=5)
#self.assertAlmostEqual(dist1.scale, 7.1536437634240135+2.075539206642004e^{0.1515051024957754x}, places=5)
self.assertAlmostEqual(dist1.loc, None, places=5)
# Now use a 2-parameter Weibull distribution instead of 3-p distr.
dist_description_0 = {'name': 'Weibull_2p',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
my_fit = Fit((sample_1, sample_2),
(dist_description_0, dist_description_1))
self.assertEqual(str(my_fit)[0:5], 'Fit()')
def test_2d_benchmark_case(self):
"""
Reproduces the baseline results presented in doi: 10.1115/OMAE2019-96523 .
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset(
path='tests/testfiles/allyears_dataset_A.txt')
# Describe the distribution that should be fitted to the sample.
dist_description_0 = {'name': 'Weibull_3p',
'dependency': (None, None, None),
'width_of_intervals': 0.5}
dist_description_1 = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
'functions': ('exp3', None, 'power3')} # Shape, location, scale.
# Compute the fit.
my_fit = Fit((sample_hs, sample_tz),
(dist_description_0, dist_description_1))
# Evaluate the fitted parameters.
dist0 = my_fit.mul_var_dist.distributions[0]
dist1 = my_fit.mul_var_dist.distributions[1]
self.assertAlmostEqual(dist0.shape(0), 1.48, delta=0.02)
self.assertAlmostEqual(dist0.scale(0), 0.944, delta=0.01)
self.assertAlmostEqual(dist0.loc(0), 0.0981, delta=0.001)
self.assertAlmostEqual(dist1.shape.a, 0, delta=0.001)
self.assertAlmostEqual(dist1.shape.b, 0.308, delta=0.002)
self.assertAlmostEqual(dist1.shape.c, -0.250, delta=0.002)
self.assertAlmostEqual(dist1.scale.a, 1.47 , delta=0.02)
self.assertAlmostEqual(dist1.scale.b, 0.214, delta=0.002)
self.assertAlmostEqual(dist1.scale.c, 0.641, delta=0.002)
self.assertAlmostEqual(dist1.scale(0), 4.3 , delta=0.1)
self.assertAlmostEqual(dist1.scale(2), 6, delta=0.1)
self.assertAlmostEqual(dist1.scale(5), 8, delta=0.1)
def test_2d_exponentiated_wbl_fit(self):
"""
Tests if a 2D fit that includes an exp. Weibull distribution works.
"""
prng = np.random.RandomState(42)
# Draw 1000 samples from a Weibull distribution with shape=1.5 and scale=3,
# which represents significant wave height.
sample_hs = prng.weibull(1.5, 1000)*3
# Let the second sample, which represents zero-upcrossing period increase
# with significant wave height and follow a Lognormal distribution with
# mean=2 and sigma=0.2
sample_tz = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_hs]
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'power3')
# Shape, Location, Scale
}
# Fit the model to the data, first test a 1D fit.
fit = Fit(sample_hs, dist_description_hs)
# Now perform the 2D fit.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
dist0 = fit.mul_var_dist.distributions[0]
self.assertGreater(dist0.shape(0), 1) # Should be about 1.5.
self.assertLess(dist0.shape(0), 2)
self.assertIsNone(dist0.loc(0)) # Has no location parameter, should be None.
self.assertGreater(dist0.scale(0), 2) # Should be about 3.
self.assertLess(dist0.scale(0), 4)
self.assertGreater(dist0.shape2(0), 0.5) # Should be about 1.
self.assertLess(dist0.shape2(0), 2)
def test_fit_lnsquare2(self):
"""
Tests a 2D fit that includes an logarithm square dependence function.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'lnsquare2')
# Shape, Location, Scale
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
self.assertGreater(dist1.scale.a, 1) # Should be about 1-5
self.assertLess(dist1.scale.a, 5) # Should be about 1-5
self.assertGreater(dist1.scale.b, 2) # Should be about 2-10
self.assertLess(dist1.scale.b, 10) # Should be about 2-10
self.assertGreater(dist1.scale(0), 0.1)
self.assertLess(dist1.scale(0), 10)
self.assertEqual(dist1.scale.func_name, 'lnsquare2')
def test_fit_powerdecrease3(self):
"""
Tests a 2D fit that includes an powerdecrease3 dependence function.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('powerdecrease3', None, 'lnsquare2')
# Shape, Location, Scale
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
self.assertGreater(dist1.shape.a, -0.1) # Should be about 0
self.assertLess(dist1.shape.a, 0.1) # Should be about 0
self.assertGreater(dist1.shape.b, 1.5) # Should be about 2-5
self.assertLess(dist1.shape.b, 6) # Should be about 2-10
self.assertGreater(dist1.shape.c, 0.8) # Should be about 1.1
self.assertLess(dist1.shape.c, 2) # Should be about 1.1
self.assertGreater(dist1.shape(0), 0.25) # Should be about 0.35
self.assertLess(dist1.shape(0), 0.4) # Should be about 0.35
self.assertEqual(dist1.shape.func_name, 'powerdecrease3')
def test_fit_asymdecrease3(self):
"""
Tests a 2D fit that includes an asymdecrease3 dependence function.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('asymdecrease3', None, 'lnsquare2')
# Shape, Location, Scale
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
self.assertAlmostEqual(dist1.shape.a, 0, delta=0.1) # Should be about 0
self.assertAlmostEqual(dist1.shape.b, 0.35, delta=0.4) # Should be about 0.35
self.assertAlmostEqual(np.abs(dist1.shape.c), 0.45, delta=0.2) # Should be about 0.45
self.assertAlmostEquals(dist1.shape(0), 0.35, delta=0.2) # Should be about 0.35
def test_min_number_datapoints_for_fit(self):
"""
Tests if the minimum number of datapoints required for a fit works.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'lnsquare2'),
# Shape, Location, Scale
'min_datapoints_for_fit': 10
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
a_min_10 = dist1.scale.a
# Now require more datapoints for a fit.
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'lnsquare2'),
# Shape, Location, Scale
'min_datapoints_for_fit': 500
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
a_min_500 = dist1.scale.a
# Because in case 2 fewer bins have been used we should get different
# coefficients for the dependence function.
self.assertNotEqual(a_min_10, a_min_500)
def test_multi_processing(selfs):
"""
2-d Fit with multiprocessing (specified by setting a value for timeout)
"""
# Define a sample and a fit.
prng = np.random.RandomState(42)
sample_1 = prng.weibull(1.5, 1000)*3
sample_2 = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_1]
dist_description_0 = {'name': 'Weibull',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
# Compute the fit.
my_fit = Fit((sample_1, sample_2),
(dist_description_0, dist_description_1),
timeout=10)
def test_wbl_fit_with_negative_location(self):
"""
Tests fitting a translated Weibull distribution which would result
in a negative location parameter.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_3p',
'dependency': (None, None, None)}
# Fit the model to the data.
fit = Fit((sample_hs, ),
(dist_description_hs, ))
# Correct values for 10 years of data can be found in
# 10.1115/OMAE2019-96523 . Here we used 1 year of data.
dist0 = fit.mul_var_dist.distributions[0]
self.assertAlmostEqual(dist0.shape(0) / 10, 1.48 / 10, places=1)
self.assertGreater(dist0.loc(0), 0.0) # Should be 0.0981
self.assertLess(dist0.loc(0), 0.3) # Should be 0.0981
self.assertAlmostEqual(dist0.scale(0), 0.944, places=1)
# Shift the wave data with -1 m and fit again.
sample_hs = sample_hs - 2
# Negative location values will be set to zero instead and a
# warning will be raised.
with self.assertWarns(RuntimeWarning):
fit = Fit((sample_hs, ),
(dist_description_hs, ))
dist0 = fit.mul_var_dist.distributions[0]
self.assertAlmostEqual(dist0.shape(0) / 10, 1.48 / 10, places=1)
# Should be estimated to be 0.0981 - 2 and corrected to be 0.
self.assertEqual(dist0.loc(0), 0)
self.assertAlmostEqual(dist0.scale(0), 0.944, places=1)
def test_omae2020_wind_wave_model(self):
"""
Tests fitting the wind-wave model that was used in the publication
'Global hierarchical models for wind and wave contours' on dataset D.
"""
sample_v, sample_hs, label_v, label_hs = read_benchmark_dataset(path='tests/testfiles/1year_dataset_D.txt')
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
dist0 = fit.mul_var_dist.distributions[0]
self.assertAlmostEqual(dist0.shape(0), 2.42, delta=1)
self.assertAlmostEqual(dist0.scale(0), 10.0, delta=2)
self.assertAlmostEqual(dist0.shape2(0), 0.761, delta=0.5)
dist1 = fit.mul_var_dist.distributions[1]
self.assertEqual(dist1.shape2(0), 5)
inspection_data1 = fit.multiple_fit_inspection_data[1]
self.assertEqual(inspection_data1.shape2_value[0], 5)
self.assertAlmostEqual(inspection_data1.shape_value[0], 0.8, delta=0.5) # interval centered at 1
self.assertAlmostEqual(inspection_data1.shape_value[4], 1.5, delta=0.5) # interval centered at 9
self.assertAlmostEqual(inspection_data1.shape_value[9], 2.5, delta=1) # interval centered at 19
self.assertAlmostEqual(dist1.shape(0), 0.8, delta=0.3)
self.assertAlmostEqual(dist1.shape(10), 1.6, delta=0.5)
self.assertAlmostEqual(dist1.shape(20), 2.3, delta=0.7)
self.assertAlmostEqual(dist1.shape.a, 0.582, delta=0.5)
self.assertAlmostEqual(dist1.shape.b, 1.90, delta=1)
self.assertAlmostEqual(dist1.shape.c, 0.248, delta=0.5)
self.assertAlmostEqual(dist1.shape.d, 8.49, delta=5)
self.assertAlmostEqual(inspection_data1.scale_value[0], 0.15, delta=0.2) # interval centered at 1
self.assertAlmostEqual(inspection_data1.scale_value[4], 1, delta=0.5) # interval centered at 9
self.assertAlmostEqual(inspection_data1.scale_value[9], 4, delta=1) # interval centered at 19
self.assertAlmostEqual(dist1.scale(0), 0.15, delta=0.5)
self.assertAlmostEqual(dist1.scale(10), 1, delta=0.5)
self.assertAlmostEqual(dist1.scale(20), 4, delta=1)
self.assertAlmostEqual(dist1.scale.a, 0.394, delta=0.5)
self.assertAlmostEqual(dist1.scale.b, 0.0178, delta=0.1)
self.assertAlmostEqual(dist1.scale.c, 1.88, delta=0.8)
def test_wrong_model(self):
"""
Tests wheter errors are raised when incorrect fitting models are
specified.
"""
sample_v, sample_hs, label_v, label_hs = read_benchmark_dataset(path='tests/testfiles/1year_dataset_D.txt')
# This structure is incorrect as there is not distribution called 'something'.
dist_description_v = {'name': 'something',
'dependency': (None, None, None, None),
'fixed_parameters': (None, None, None, None), # shape, location, scale, shape2
'width_of_intervals': 2}
with self.assertRaises(ValueError):
# Fit the model to the data.
fit = Fit((sample_v, ),
(dist_description_v, ))
# This structure is incorrect as there is not dependence function called 'something'.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('something', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
with self.assertRaises(ValueError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
# This structure is incorrect as there will be only 1 or 2 intervals
# that fit 2000 datapoints.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 2000}
with self.assertRaises(RuntimeError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
# This structure is incorrect as alpha3 is only compatible with
# logistics4 .
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('power3', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
with self.assertRaises(TypeError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
# This structure is incorrect as only shape2 of an exponentiated Weibull
# distribution can be fixed at the moment.
dist_description_v = {'name': 'Lognormal',
'dependency': (None, None, None, None),
'fixed_parameters': (None, None, 5, None), # shape, location, scale, shape2
'width_of_intervals': 2}
with self.assertRaises(NotImplementedError):
# Fit the model to the data.
fit = Fit((sample_v, ),
(dist_description_v, ))
# This structure is incorrect as only shape2 of an exponentiated Weibull
# distribution can be fixed at the moment.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, 5, None), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
with self.assertRaises(NotImplementedError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
def test_weighting_of_dependence_function(self):
"""
Tests if using weights when the dependence function is fitted works
correctly.
"""
sample_v, sample_hs, label_v, label_hs = read_benchmark_dataset(path='tests/testfiles/1year_dataset_D.txt')
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20,
'do_use_weights_for_dependence_function': False}
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
dist1_no_weights = fit.mul_var_dist.distributions[1]
# Now perform a fit with weights.
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20,
'do_use_weights_for_dependence_function': True}
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
dist1_with_weights = fit.mul_var_dist.distributions[1]
# Make sure the two fitted dependnece functions are different.
d = np.abs(dist1_with_weights.scale(0) - dist1_no_weights.scale(0)) / \
np.abs(dist1_no_weights.scale(0))
self.assertGreater(d, 0.01)
# Make sure they are not too different.
d = np.abs(dist1_with_weights.scale(20) - dist1_no_weights.scale(20)) / \
np.abs(dist1_no_weights.scale(20))
self.assertLess(d, 0.5)
| 46.563847 | 121 | 0.561044 | 3,248 | 28,078 | 4.695197 | 0.103756 | 0.072787 | 0.046033 | 0.040918 | 0.763016 | 0.729639 | 0.683738 | 0.626623 | 0.566295 | 0.546951 | 0 | 0.052405 | 0.337382 | 28,078 | 602 | 122 | 46.641196 | 0.767267 | 0.239511 | 0 | 0.535088 | 0 | 0 | 0.104394 | 0.021705 | 0 | 0 | 0 | 0 | 0.251462 | 1 | 0.038012 | false | 0 | 0.011696 | 0 | 0.055556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81bafa0175de3af83830a52504e9b10d4a89639b | 10,439 | py | Python | pocketsmith/models/attachment.py | brett-comber/python-pocketsmith-api | a9c7f25abf65e4e022535431dc1d34d6a1bd97e8 | [
"MIT"
] | null | null | null | pocketsmith/models/attachment.py | brett-comber/python-pocketsmith-api | a9c7f25abf65e4e022535431dc1d34d6a1bd97e8 | [
"MIT"
] | null | null | null | pocketsmith/models/attachment.py | brett-comber/python-pocketsmith-api | a9c7f25abf65e4e022535431dc1d34d6a1bd97e8 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
PocketSmith
The public PocketSmith API # noqa: E501
The version of the OpenAPI document: 2.0
Contact: api@pocketsmith.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from pocketsmith.configuration import Configuration
class Attachment(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'content_type': 'str',
'content_type_meta': 'AttachmentContentTypeMeta',
'created_at': 'datetime',
'file_name': 'str',
'id': 'int',
'original_url': 'str',
'title': 'str',
'type': 'str',
'updated_at': 'datetime',
'variants': 'AttachmentVariants'
}
attribute_map = {
'content_type': 'content_type',
'content_type_meta': 'content_type_meta',
'created_at': 'created_at',
'file_name': 'file_name',
'id': 'id',
'original_url': 'original_url',
'title': 'title',
'type': 'type',
'updated_at': 'updated_at',
'variants': 'variants'
}
def __init__(self, content_type=None, content_type_meta=None, created_at=None, file_name=None, id=None, original_url=None, title=None, type=None, updated_at=None, variants=None, local_vars_configuration=None): # noqa: E501
"""Attachment - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._content_type = None
self._content_type_meta = None
self._created_at = None
self._file_name = None
self._id = None
self._original_url = None
self._title = None
self._type = None
self._updated_at = None
self._variants = None
self.discriminator = None
if content_type is not None:
self.content_type = content_type
if content_type_meta is not None:
self.content_type_meta = content_type_meta
if created_at is not None:
self.created_at = created_at
if file_name is not None:
self.file_name = file_name
if id is not None:
self.id = id
if original_url is not None:
self.original_url = original_url
if title is not None:
self.title = title
if type is not None:
self.type = type
if updated_at is not None:
self.updated_at = updated_at
if variants is not None:
self.variants = variants
@property
def content_type(self):
"""Gets the content_type of this Attachment. # noqa: E501
The content type of the attachment. # noqa: E501
:return: The content_type of this Attachment. # noqa: E501
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""Sets the content_type of this Attachment.
The content type of the attachment. # noqa: E501
:param content_type: The content_type of this Attachment. # noqa: E501
:type: str
"""
self._content_type = content_type
@property
def content_type_meta(self):
"""Gets the content_type_meta of this Attachment. # noqa: E501
:return: The content_type_meta of this Attachment. # noqa: E501
:rtype: AttachmentContentTypeMeta
"""
return self._content_type_meta
@content_type_meta.setter
def content_type_meta(self, content_type_meta):
"""Sets the content_type_meta of this Attachment.
:param content_type_meta: The content_type_meta of this Attachment. # noqa: E501
:type: AttachmentContentTypeMeta
"""
self._content_type_meta = content_type_meta
@property
def created_at(self):
"""Gets the created_at of this Attachment. # noqa: E501
When the attachment was created # noqa: E501
:return: The created_at of this Attachment. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this Attachment.
When the attachment was created # noqa: E501
:param created_at: The created_at of this Attachment. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def file_name(self):
"""Gets the file_name of this Attachment. # noqa: E501
The file name of the attachment # noqa: E501
:return: The file_name of this Attachment. # noqa: E501
:rtype: str
"""
return self._file_name
@file_name.setter
def file_name(self, file_name):
"""Sets the file_name of this Attachment.
The file name of the attachment # noqa: E501
:param file_name: The file_name of this Attachment. # noqa: E501
:type: str
"""
self._file_name = file_name
@property
def id(self):
"""Gets the id of this Attachment. # noqa: E501
The unique identifier of the attachment # noqa: E501
:return: The id of this Attachment. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Attachment.
The unique identifier of the attachment # noqa: E501
:param id: The id of this Attachment. # noqa: E501
:type: int
"""
self._id = id
@property
def original_url(self):
"""Gets the original_url of this Attachment. # noqa: E501
The url of the attachment # noqa: E501
:return: The original_url of this Attachment. # noqa: E501
:rtype: str
"""
return self._original_url
@original_url.setter
def original_url(self, original_url):
"""Sets the original_url of this Attachment.
The url of the attachment # noqa: E501
:param original_url: The original_url of this Attachment. # noqa: E501
:type: str
"""
self._original_url = original_url
@property
def title(self):
"""Gets the title of this Attachment. # noqa: E501
The title of the attachment. If blank or not provided, the title will be derived from the file name. # noqa: E501
:return: The title of this Attachment. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this Attachment.
The title of the attachment. If blank or not provided, the title will be derived from the file name. # noqa: E501
:param title: The title of this Attachment. # noqa: E501
:type: str
"""
self._title = title
@property
def type(self):
"""Gets the type of this Attachment. # noqa: E501
The type of attachment # noqa: E501
:return: The type of this Attachment. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Attachment.
The type of attachment # noqa: E501
:param type: The type of this Attachment. # noqa: E501
:type: str
"""
self._type = type
@property
def updated_at(self):
"""Gets the updated_at of this Attachment. # noqa: E501
When the attachment was last updated # noqa: E501
:return: The updated_at of this Attachment. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this Attachment.
When the attachment was last updated # noqa: E501
:param updated_at: The updated_at of this Attachment. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def variants(self):
"""Gets the variants of this Attachment. # noqa: E501
:return: The variants of this Attachment. # noqa: E501
:rtype: AttachmentVariants
"""
return self._variants
@variants.setter
def variants(self, variants):
"""Sets the variants of this Attachment.
:param variants: The variants of this Attachment. # noqa: E501
:type: AttachmentVariants
"""
self._variants = variants
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Attachment):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Attachment):
return True
return self.to_dict() != other.to_dict()
| 28.061828 | 227 | 0.590861 | 1,268 | 10,439 | 4.694006 | 0.104101 | 0.06586 | 0.107527 | 0.100806 | 0.531586 | 0.444892 | 0.399866 | 0.28629 | 0.158938 | 0.089382 | 0 | 0.022031 | 0.321678 | 10,439 | 371 | 228 | 28.137466 | 0.818528 | 0.379634 | 0 | 0.089744 | 1 | 0 | 0.067543 | 0.004626 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.025641 | 0 | 0.320513 | 0.012821 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81bce2f74bd4337a65e512dbd85c7e158418982f | 16,476 | py | Python | pynsq/nsq/NSQReader.py | ghorges/nsq-2.0 | b8dc67fa9467e9f39f976f923b798f574d12d8a9 | [
"MIT"
] | null | null | null | pynsq/nsq/NSQReader.py | ghorges/nsq-2.0 | b8dc67fa9467e9f39f976f923b798f574d12d8a9 | [
"MIT"
] | null | null | null | pynsq/nsq/NSQReader.py | ghorges/nsq-2.0 | b8dc67fa9467e9f39f976f923b798f574d12d8a9 | [
"MIT"
] | null | null | null | """
high-level NSQ reader class built on top of a Tornado IOLoop supporting both sync and
async modes of operation.
supports various hooks to modify behavior when heartbeats are received, temporarily
disable the reader, and pre-process/validate messages.
when supplied a list of nsqlookupd addresses, a reader instance will periodically poll
the specified topic in order to discover new producers and reconnect to existing ones.
sync ex.
import nsq
def task1(message):
print message
return True
def task2(message):
print message
return True
all_tasks = {"task1": task1, "task2": task2}
r = nsq.Reader(all_tasks, lookupd_http_addresses=['http://127.0.0.1:4161'],
topic="nsq_reader", channel="asdf", lookupd_poll_interval=15)
nsq.run()
async ex.
import nsq
buf = []
def process_message(message, finisher):
global buf
# cache both the message and the finisher callable for later processing
buf.append((message, finisher))
if len(buf) >= 3:
print '****'
for msg, finish_fxn in buf:
print msg
finish_fxn(True) # use finish_fxn to tell NSQ of success
print '****'
buf = []
else:
print 'deferring processing'
all_tasks = {"task1": process_message}
r = nsq.Reader(all_tasks, lookupd_http_addresses=['http://127.0.0.1:4161'],
topic="nsq_reader", channel="async", async=True)
nsq.run()
"""
import logging
try:
import simplejson as json
except ImportError:
import json
import time
import signal
import socket
import functools
import urllib
import random
import tornado.ioloop
import tornado.httpclient
import BackoffTimer
import nsq
import async
class RequeueWithoutBackoff(Exception):
"""exception for requeueing a message without incrementing backoff"""
pass
class Reader(object):
def __init__(self, all_tasks, topic, channel,
nsqd_tcp_addresses=None, lookupd_http_addresses=None, async=False,
max_tries=5, max_in_flight=1, requeue_delay=90, lookupd_poll_interval=120):
"""
Reader receives messages over the specified ``topic/channel`` and provides an async loop
that calls each task method provided by ``all_tasks`` up to ``max_tries``.
It will handle sending FIN or REQ commands based on feedback from the task methods. When
re-queueing, an increasing delay will be calculated automatically. Additionally, when
message processing fails, it will backoff for increasing multiples of ``requeue_delay``
between updating of RDY count.
``all_tasks`` defines the a mapping of tasks and callables that will be executed for each
message received.
``topic`` specifies the desired NSQ topic
``channel`` specifies the desired NSQ channel
``nsqd_tcp_addresses`` a sequence of string addresses of the nsqd instances this reader
should connect to
``lookupd_http_addresses`` a sequence of string addresses of the nsqlookupd instances this
reader should query for producers of the specified topic
``async`` determines whether handlers will do asynchronous processing. If set to True,
handlers must accept a keyword argument called ``finisher`` that will be a callable used
to signal message completion, taking a boolean argument indicating success.
``max_tries`` the maximum number of attempts the reader will make to process a message after
which messages will be automatically discarded
``max_in_flight`` the maximum number of messages this reader will pipeline for processing.
this value will be divided evenly amongst the configured/discovered nsqd producers.
``requeue_delay`` the base multiple used when re-queueing (multiplied by # of attempts)
``lookupd_poll_interval`` the amount of time in between querying all of the supplied
nsqlookupd instances. a random amount of time based on thie value will be initially
introduced in order to add jitter when multiple readers are running.
"""
assert isinstance(all_tasks, dict)
for key, method in all_tasks.items():
assert callable(method), "key %s must have a callable value" % key
assert isinstance(topic, (str, unicode)) and len(topic) > 0
assert isinstance(channel, (str, unicode)) and len(channel) > 0
assert isinstance(max_in_flight, int) and 0 < max_in_flight < 2500
if nsqd_tcp_addresses:
if not isinstance(nsqd_tcp_addresses, (list, set, tuple)):
assert isinstance(nsqd_tcp_addresses, (str, unicode))
nsqd_tcp_addresses = [nsqd_tcp_addresses]
else:
nsqd_tcp_addresses = []
if lookupd_http_addresses:
if not isinstance(lookupd_http_addresses, (list, set, tuple)):
assert isinstance(lookupd_http_addresses, (str, unicode))
lookupd_http_addresses = [lookupd_http_addresses]
else:
lookupd_http_addresses = []
assert nsqd_tcp_addresses or lookupd_http_addresses
self.topic = topic
self.channel = channel
self.nsqd_tcp_addresses = nsqd_tcp_addresses
self.lookupd_http_addresses = lookupd_http_addresses
self.requeue_delay = int(requeue_delay * 1000)
self.max_tries = max_tries
self.max_in_flight = max_in_flight
self.lookupd_poll_interval = lookupd_poll_interval
self.async = async
self.task_lookup = all_tasks
self.backoff_timer = dict((k, BackoffTimer.BackoffTimer(0, 120)) for k in self.task_lookup.keys())
self.hostname = socket.gethostname()
self.short_hostname = self.hostname.split('.')[0]
self.conns = {}
self.http_client = tornado.httpclient.AsyncHTTPClient()
self.last_recv_timestamps = {}
logging.info("starting reader for topic '%s'..." % self.topic)
for task in self.task_lookup:
for addr in self.nsqd_tcp_addresses:
address, port = addr.split(':')
self.connect_to_nsqd(address, int(port), task)
# trigger the first one manually
self.query_lookupd()
tornado.ioloop.PeriodicCallback(self.check_last_recv_timestamps, 60 * 1000).start()
periodic = tornado.ioloop.PeriodicCallback(self.query_lookupd, self.lookupd_poll_interval * 1000)
# randomize the time we start this poll loop so that all servers don't query at exactly the same time
# randomize based on 10% of the interval
delay = random.random() * self.lookupd_poll_interval * .1
tornado.ioloop.IOLoop.instance().add_timeout(time.time() + delay, periodic.start)
def _client_callback(self, success, message=None, task=None, conn=None):
'''
This is the method that an asynchronous nsqreader should call to indicate
async completion of a message. This will most likely be exposed as the finisher
callable created in `callback` above with some functools voodoo
'''
if success:
self.backoff_timer[task].success()
self.finish(conn, message.id)
else:
self.backoff_timer[task].failure()
self.requeue(conn, message)
def requeue(self, conn, message, delay=True):
if message.attempts > self.max_tries:
self.giving_up(message)
return self.finish(conn, message.id)
try:
# ms
requeue_delay = self.requeue_delay * message.attempts if delay else 0
conn.send(nsq.requeue(message.id, str(requeue_delay)))
except Exception:
conn.close()
logging.exception('[%s] failed to send requeue %s @ %d' % (conn, message.id, requeue_delay))
def finish(self, conn, message_id):
'''
This is an internal method for NSQReader
'''
try:
conn.send(nsq.finish(message_id))
except Exception:
conn.close()
logging.exception('[%s] failed to send finish %s' % (conn, message_id))
def connection_max_in_flight(self):
return max(1, self.max_in_flight / max(1, len(self.conns)))
def handle_message(self, conn, task, message):
conn.ready -= 1
# update ready count if necessary...
# if we're in a backoff state for this task
# set a timer to actually send the ready update
per_conn = self.connection_max_in_flight()
if not conn.is_sending_ready and (conn.ready <= 1 or conn.ready < int(per_conn * 0.25)):
backoff_interval = self.backoff_timer[task].get_interval()
if self.disabled():
backoff_interval = 15
if backoff_interval > 0:
conn.is_sending_ready = True
logging.info('[%s] backing off for %0.2f seconds' % (conn, backoff_interval))
send_ready_callback = functools.partial(self.send_ready, conn, per_conn)
tornado.ioloop.IOLoop.instance().add_timeout(time.time() + backoff_interval, send_ready_callback)
else:
self.send_ready(conn, per_conn)
try:
processed_message = self.preprocess_message(message)
if not self.validate_message(processed_message):
return self.finish(conn, message.id)
except Exception:
logging.exception('[%s] caught exception while preprocessing' % conn)
return self.requeue(conn, message)
method_callback = self.task_lookup[task]
try:
if self.async:
# this handler accepts the finisher callable as a keyword arg
finisher = functools.partial(self._client_callback, message=message, task=task, conn=conn)
return method_callback(processed_message, finisher=finisher)
else:
# this is an old-school sync handler, give it just the message
if method_callback(processed_message):
self.backoff_timer[task].success()
return self.finish(conn, message.id)
self.backoff_timer[task].failure()
except RequeueWithoutBackoff:
logging.info('RequeueWithoutBackoff')
except Exception:
logging.exception('[%s] caught exception while handling %s' % (conn, task))
self.backoff_timer[task].failure()
return self.requeue(conn, message)
def send_ready(self, conn, value):
if self.disabled():
logging.info('[%s] disabled, delaying ready state change', conn)
send_ready_callback = functools.partial(self.send_ready, conn, value)
tornado.ioloop.IOLoop.instance().add_timeout(time.time() + 15, send_ready_callback)
return
try:
conn.send(nsq.ready(value))
conn.ready = value
except Exception:
conn.close()
logging.exception('[%s] failed to send ready' % conn)
conn.is_sending_ready = False
def _data_callback(self, conn, raw_data, task):
self.last_recv_timestamps[get_conn_id(conn, task)] = time.time()
frame, data = nsq.unpack_response(raw_data)
if frame == nsq.FRAME_TYPE_MESSAGE:
message = nsq.decode_message(data)
try:
self.handle_message(conn, task, message)
except Exception:
logging.exception('[%s] failed to handle_message() %r' % (conn, message))
elif frame == nsq.FRAME_TYPE_RESPONSE and data == "_heartbeat_":
self.heartbeat(conn)
conn.send(nsq.nop())
def connect_to_nsqd(self, address, port, task):
assert isinstance(address, (str, unicode))
assert isinstance(port, int)
conn_id = address + ':' + str(port) + ':' + task
if conn_id in self.conns:
return
logging.info("[%s] connecting to nsqd for '%s'", address + ':' + str(port), task)
connect_callback = functools.partial(self._connect_callback, task=task)
data_callback = functools.partial(self._data_callback, task=task)
close_callback = functools.partial(self._close_callback, task=task)
conn = async.AsyncConn(address, port, connect_callback, data_callback, close_callback)
conn.connect()
self.conns[conn_id] = conn
def _connect_callback(self, conn, task):
if len(self.task_lookup) > 1:
channel = self.channel + '.' + task
else:
channel = self.channel
initial_ready = self.connection_max_in_flight()
try:
conn.send(nsq.subscribe(self.topic, channel, self.short_hostname, self.hostname))
conn.send(nsq.ready(initial_ready))
conn.ready = initial_ready
conn.is_sending_ready = False
except Exception:
conn.close()
logging.exception('[%s] failed to bootstrap connection' % conn)
def _close_callback(self, conn, task):
conn_id = get_conn_id(conn, task)
if conn_id in self.conns:
del self.conns[conn_id]
logging.warning("[%s] connection closed... %d left open", conn, len(self.conns))
if len(self.conns) == 0 and len(self.lookupd_http_addresses) == 0:
logging.warning("all connections closed and no lookupds... exiting")
tornado.ioloop.IOLoop.instance().stop()
def query_lookupd(self):
for endpoint in self.lookupd_http_addresses:
lookupd_url = endpoint + "/lookup?topic=" + urllib.quote(self.topic)
req = tornado.httpclient.HTTPRequest(lookupd_url, method="GET",
connect_timeout=1, request_timeout=2)
callback = functools.partial(self._finish_query_lookupd, endpoint=endpoint)
self.http_client.fetch(req, callback=callback)
def _finish_query_lookupd(self, response, endpoint):
if response.error:
logging.warning("[%s] lookupd error %s", endpoint, response.error)
return
try:
lookup_data = json.loads(response.body)
except json.JSONDecodeError:
logging.warning("[%s] failed to parse JSON from lookupd: %r", endpoint, response.body)
return
if lookup_data['status_code'] != 200:
logging.warning("[%s] lookupd responded with %d", endpoint, lookup_data['status_code'])
return
for task in self.task_lookup:
for producer in lookup_data['data']['producers']:
self.connect_to_nsqd(producer['address'], producer['tcp_port'], task)
def check_last_recv_timestamps(self):
now = time.time()
for conn_id, conn in dict(self.conns).iteritems():
timestamp = self.last_recv_timestamps.get(conn_id, 0)
if (now - timestamp) > 60:
# this connection hasnt received data beyond
# the normal heartbeat interval, close it
logging.warning("[%s] connection is stale, closing", conn)
conn = self.conns[conn_id]
conn.close()
#
# subclass overwriteable
#
def giving_up(self, message):
logging.warning("giving up on message '%s' after max tries %d", message.id, self.max_tries)
def disabled(self):
return False
def heartbeat(self, conn):
pass
def validate_message(self, message):
return True
def preprocess_message(self, message):
return message
def get_conn_id(conn, task):
return str(conn) + ':' + task
def _handle_term_signal(sig_num, frame):
logging.info('TERM Signal handler called with signal %r' % sig_num)
tornado.ioloop.IOLoop.instance().stop()
def run():
signal.signal(signal.SIGTERM, _handle_term_signal)
tornado.ioloop.IOLoop.instance().start()
| 40.581281 | 113 | 0.624059 | 1,968 | 16,476 | 5.075711 | 0.197154 | 0.016518 | 0.030033 | 0.016218 | 0.204125 | 0.135249 | 0.100611 | 0.077385 | 0.04545 | 0.030534 | 0 | 0.007857 | 0.289269 | 16,476 | 405 | 114 | 40.681481 | 0.845175 | 0.031682 | 0 | 0.227848 | 0 | 0 | 0.06889 | 0.001773 | 0 | 0 | 0 | 0 | 0.042194 | 0 | null | null | 0.008439 | 0.063291 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81bf6ad4a1d9f400fda048a534023120e5946c0a | 4,098 | py | Python | packages/utils/propagate_license.py | justi/m2g | 09e8b889889ee8d8fb08b9b6fcd726fb3d901644 | [
"Apache-2.0"
] | 12 | 2015-03-11T22:07:17.000Z | 2016-01-29T21:24:29.000Z | packages/utils/propagate_license.py | youngmook/m2g | 09e8b889889ee8d8fb08b9b6fcd726fb3d901644 | [
"Apache-2.0"
] | 213 | 2015-01-30T16:02:57.000Z | 2016-01-29T21:45:02.000Z | packages/utils/propagate_license.py | youngmook/m2g | 09e8b889889ee8d8fb08b9b6fcd726fb3d901644 | [
"Apache-2.0"
] | 5 | 2015-02-04T13:58:12.000Z | 2016-01-29T21:24:46.000Z | #!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# propagate_license.py
# Created by Disa Mhembere on 2014-05-16.
# Email: disa@jhu.edu
__license_header__ = """
{} Copyright 2014 Open Connectome Project (http://openconnecto.me)
{}
{} Licensed under the Apache License, Version 2.0 (the "License");
{} you may not use this file except in compliance with the License.
{} You may obtain a copy of the License at
{}
{} http://www.apache.org/licenses/LICENSE-2.0
{}
{} Unless required by applicable law or agreed to in writing, software
{} distributed under the License is distributed on an "AS IS" BASIS,
{} WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
{} See the License for the specific language governing permissions and
{} limitations under the License.
{}
"""
COMM_COUNT = 14
comm = {".py":"#", ".pyx":"#", "": "#", ".html":"", ".sh":"#", ".r":"#", ".m":"%", ".c":"//",
".c++":"//", ".java":"//", ".js":"//"}
import argparse
import os
def add(files):
global __license_header__
for full_fn in files:
license_header = __license_header__
print "Processing file: %s ..." % full_fn
script = open(full_fn, "rb")
lines = script.read().splitlines()
script.close()
# Exception for html
comment_style = comm[os.path.splitext(full_fn)[1].lower()]
if lines[0].startswith("#!/usr/bin"):
if lines[5].startswith("# Copyright"): # get rid of copyright year
del lines[5], lines[1]
lines.insert(1, license_header.format(*([comment_style]*COMM_COUNT)))
else:
#license_header += "{} Created by Disa Mhembere\n{} Email: disa@jhu.edu".format(*([comment_style]*2))
if os.path.splitext(full_fn)[1].lower().strip() == ".html":
license_header = "<!-- " + license_header + " -->"
lines.insert(0, license_header.format(*([comment_style]*COMM_COUNT)))
script = open(full_fn, "wb")
script.write("\n".join(lines))
def hidden(path):
breakdown = path.split("/")
for item in breakdown:
if item.startswith("."):
return True
return False
def rm(dirname):
pass
def main():
parser = argparse.ArgumentParser(description="Add or Update license headers to code")
parser.add_argument("-r", "--remove", action="store_true", help="Remove the license")
parser.add_argument("-d", "--dirname", action="store", default=".", help="Directory where to start walk")
parser.add_argument("-f", "--files", action="store", nargs="*", help="Files you want license added to")
parser.add_argument("-e", "--file_exts", nargs="*", action="store", \
default=[".py", ".pyx", ".html", ".sh", ".R", ".m", ""], \
help="File extensions to add to the files altered")
parser.add_argument("-i", "--ignore", nargs="*", action="store", \
default=["README", "__init__.py", "TODO", __file__], \
help="Files to ignore")
result = parser.parse_args()
if result.files:
print "Licensing individual files ..."
add(result.files)
exit(1)
else:
print "Licensing a directory of files ..."
files = []
for root, dirnames, filenames in os.walk(os.path.abspath(result.dirname)):
for filename in filenames:
full_fn = os.path.join(root, filename)
if os.path.isfile(full_fn) and not hidden(full_fn) \
and not os.path.basename(full_fn) in result.ignore \
and ( os.path.splitext(full_fn)[-1].lower().strip() in result.file_exts ):
files.append(full_fn)
add(files)
if __name__ == "__main__":
main()
| 35.327586 | 107 | 0.656418 | 562 | 4,098 | 4.669039 | 0.33452 | 0.049543 | 0.032393 | 0.02439 | 0.43064 | 0.43064 | 0.420732 | 0.380335 | 0.356707 | 0.356707 | 0 | 0.011035 | 0.181796 | 4,098 | 115 | 108 | 35.634783 | 0.771548 | 0.203026 | 0 | 0.077922 | 0 | 0 | 0.343297 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.012987 | 0.025974 | null | null | 0.038961 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81c08bcad1b73822669737a9c7a8c3b7773030bc | 430 | py | Python | videoclip_sources/e004.py | ChrisScarred/misty2py-skills | 30557d246b91fb525866fe8b92e280d2609ca26b | [
"MIT"
] | null | null | null | videoclip_sources/e004.py | ChrisScarred/misty2py-skills | 30557d246b91fb525866fe8b92e280d2609ca26b | [
"MIT"
] | null | null | null | videoclip_sources/e004.py | ChrisScarred/misty2py-skills | 30557d246b91fb525866fe8b92e280d2609ca26b | [
"MIT"
] | null | null | null | import time
from misty2py.robot import Misty
from misty2py.utils.env_loader import EnvLoader
from misty2py_skills.utils.utils import get_abs_path
env_loader = EnvLoader(get_abs_path(".env"))
m = Misty(env_loader.get_ip())
d = m.event("subscribe", type="BatteryCharge")
e_name = d.get("event_name")
time.sleep(1)
d = m.event("get_data", name=e_name)
# do something with the data here
d = m.event("unsubscribe", name=e_name)
| 21.5 | 52 | 0.755814 | 72 | 430 | 4.319444 | 0.444444 | 0.115756 | 0.067524 | 0.083601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010526 | 0.116279 | 430 | 19 | 53 | 22.631579 | 0.807895 | 0.072093 | 0 | 0 | 0 | 0 | 0.138539 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.363636 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
81c1b8a6fb449ff2c4c107dcaec453b46983daed | 2,302 | py | Python | p2/Python Files/audit_street.py | priyankaswadi/Udacity-Data-Analyst-Nanodegree | 52989f7e447e69c6fb08119f4e39a4500dcdf571 | [
"Apache-2.0"
] | null | null | null | p2/Python Files/audit_street.py | priyankaswadi/Udacity-Data-Analyst-Nanodegree | 52989f7e447e69c6fb08119f4e39a4500dcdf571 | [
"Apache-2.0"
] | null | null | null | p2/Python Files/audit_street.py | priyankaswadi/Udacity-Data-Analyst-Nanodegree | 52989f7e447e69c6fb08119f4e39a4500dcdf571 | [
"Apache-2.0"
] | null | null | null | #Map incorrect and abbreviated street names with correct/better ones
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
import pprint
OSMFILE = "albany.osm"
street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
# UPDATE THIS VARIABLE
mapping = {"rd": "Road",
"Rd": "Road",
"road": "Road",
"Ave": "Avenue",
"Ave.": "Avenue",
"AVE": "Avenue",
"way" : "Way",
"street": "Street",
"way":"Way",
"Dr.":"Drive",
"Blvd":"Boulevard",
"rt":"Route",
"Ext": "Extension",
"Jay":"Jay Street",
"Nott St E":"Nott Street East",
"Troy-Schenetady-Road":"Troy Schenectady Road",
"Troy-Schenetady Rd" :"Troy Schenectady Road",
"Delatour":"Delatour Road",
"Deltour": "Delatour Road",
"Sparrowbush": "Sparrowbush Road"
}
def audit_street_type(street_types, street_name):
m = street_type_re.search(street_name)
if m:
street_type = m.group()
if street_type not in expected:
street_types[street_type].add(street_name)
def is_street_name(elem):
return (elem.attrib['k'] == "addr:street")
def audit(osmfile):
osm_file = open(osmfile, "r")
street_types = defaultdict(set)
for event, elem in ET.iterparse(osm_file, events=("start",)):
if elem.tag == "node" or elem.tag == "way":
for tag in elem.iter("tag"):
if is_street_name(tag):
audit_street_type(street_types, tag.attrib['v'])
osm_file.close()
return street_types
def update_name(name, mapping):
n = street_type_re.search(name)
if n:
n = n.group()
for m in mapping:
if n == m:
name = name[:-len(n)] + mapping[m]
return name
def test():
st_types = audit(OSMFILE)
pprint.pprint(dict(st_types))
for st_type, ways in st_types.iteritems():
for name in ways:
better_name = update_name(name, mapping)
if (name == better_name):
continue
print name + " --> " + better_name
if __name__ == '__main__':
test() | 27.73494 | 68 | 0.541703 | 270 | 2,302 | 4.451852 | 0.366667 | 0.066556 | 0.02995 | 0.02995 | 0.043261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.324935 | 2,302 | 83 | 69 | 27.73494 | 0.773488 | 0.038228 | 0 | 0.03125 | 0 | 0 | 0.162223 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.0625 | null | null | 0.046875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81c238300e9927729e01076aa4674e5af0b62cf8 | 3,078 | py | Python | lista08_pesquisa/questao02.py | mayararysia/ESTD | 65aa8816aa8773066201cb410b02c1cb72ad5611 | [
"MIT"
] | null | null | null | lista08_pesquisa/questao02.py | mayararysia/ESTD | 65aa8816aa8773066201cb410b02c1cb72ad5611 | [
"MIT"
] | null | null | null | lista08_pesquisa/questao02.py | mayararysia/ESTD | 65aa8816aa8773066201cb410b02c1cb72ad5611 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#Lista de Exercícios 08 (Pesquisa) - Questão 02
#Mayara Rysia
from time import time
from time import sleep
from random import randint
"""
2. Use as duas funções de busca binária apresentadas (iterativa e recursiva). Gere
uma lista de números aleatórios, ordene-os e verifique o desempenho delas. Qual
os resultados?
"""
#Busca Binária - código recursivo
def busca_binaria(uma_lista, item_procurado):
if len(uma_lista) == 0:
return False
meio = len(uma_lista)//2
if uma_lista[meio] == item_procurado:
return True
if item_procurado < uma_lista[meio]:
return busca_binaria(uma_lista[:meio], item_procurado)
else:
return busca_binaria(uma_lista[meio+1:], item_procurado)
#Busca Binária - código iterativo
def busca_binaria_it(uma_lista, item_pesquisado):
inicio = 0
fim = len(uma_lista)-1
encontrou = False
while inicio<=fim and not encontrou:
meio = (inicio + fim)//2
if uma_lista[meio] == item_pesquisado:
encontrou = True
else:
if item_pesquisado < uma_lista[meio]:
fim = meio-1
else:
inicio = meio+1
return encontrou
#ordena a lista
def ordena(lista):
quant = tam = len(lista)
continua = True
while quant>=1 and continua:
continua = False
for i in range(tam):
j=i+1
if j != tam and lista[i] > lista[j]:
continua = True
ant = lista[i]
lista[i] = lista[j]
lista[j] = ant
i=j
quant-=1
return lista
#cria a lista
def criaLista():
lista = []
for i in range(9):
num = randint(0, 42)
lista.append(num)
return lista
def Teste(lista, num):
print('Procurando ', num,'na lista', lista)
inicio = time()
result = busca_binaria(lista, num)
fim = time()
tempo_gasto = fim-inicio
print('resultado', result)
return tempo_gasto
def Teste_it(lista, num):
print('Procurando ', num,'na lista', lista)
inicio = time()
result = busca_binaria_it(lista, num)
fim = time()
tempo_gasto = fim-inicio
print('resultado', result)
return tempo_gasto
if __name__ == '__main__':
l = criaLista()
lista = ordena(l)
qtd_br = qtd_bi = 0
#Testes
for i in range(5):
num = randint(0, 42)
print("<< Busca Recursiva >> \n")
tempo_gasto_br = Teste(lista, num)
print('\ttempo gasto: ', tempo_gasto_br)
print('\n\n')
sleep(2)
print("<< Busca Iterativa >> \n")
tempo_gasto_bi = Teste_it(lista, num)
print('\ttempo gasto: ', tempo_gasto_bi)
print('\n\n')
if tempo_gasto_br < tempo_gasto_bi:
qtd_br +=1
print('\n-> Busca Recursiva levou o menor tempo\n')
else:
qtd_bi +=1
print('\n-> Busca Iterativa levou o menor tempo\n')
print("------- ------- ------- ------- -------")
print("\nCONCLUSÃO\n\n ")
if qtd_br > qtd_bi:
print("Busca Binária Recursiva teve o melhor desempenho!")
else:
print("Busca Binária Iterativa teve o melhor desempenho!")
print("Quantidade Binária Recursiva: ", qtd_br)
print("Quantidade Binária Iterativa: ", qtd_bi)
| 20.938776 | 82 | 0.635153 | 430 | 3,078 | 4.404651 | 0.246512 | 0.050686 | 0.038015 | 0.031679 | 0.268215 | 0.231257 | 0.179514 | 0.143611 | 0.143611 | 0.143611 | 0 | 0.012361 | 0.237817 | 3,078 | 146 | 83 | 21.082192 | 0.79497 | 0.056855 | 0 | 0.268817 | 0 | 0 | 0.165066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.032258 | null | null | 0.193548 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81ca35091868d035a8a09d9c9753adadf774b179 | 6,088 | py | Python | api-server.py | proatria/sftpplus-api-example | 1fc3af66beef06d66ad46a0cf74bb0905793cf7f | [
"MIT"
] | null | null | null | api-server.py | proatria/sftpplus-api-example | 1fc3af66beef06d66ad46a0cf74bb0905793cf7f | [
"MIT"
] | null | null | null | api-server.py | proatria/sftpplus-api-example | 1fc3af66beef06d66ad46a0cf74bb0905793cf7f | [
"MIT"
] | null | null | null | """
Run a simple HTTP server which provides API endpoint for SFTPPlus.
Usage:
server.py [options]
-h --help Show this help.
-p --port=8000 Listen to a specific port. [default: 8080]
-a --address=127.0.0.1 Listen on specific address. [default: 0.0.0.0]
-c --certificate=PATH Enable HTTPS by defining the path
to a file containing server key, certificate, and CA chain
all PEM format and stored in a single file.
-f --flaky Introduce random errors to test SFTPPlus API retry functionality.
The following API endpoints are provided:
* /auth-api - For the authentication API
* /event-api - For the event handler API
"""
from __future__ import absolute_import, unicode_literals
import base64
import json
import ssl
from random import randint
from aiohttp import web
from docopt import docopt
# Command line handling part.
arguments = docopt(__doc__)
# Convert arguments to usable types.
port = int(arguments["--port"])
# Need to escape the address for ipv6.
address = arguments["--address"].replace(":", r"\:")
is_flaky = arguments["--flaky"]
certificate = arguments["--certificate"]
# Set to lower values to increase the probability of a failure.
_FLAKY_DEGREE = 3
# DB with accepted accounts.
# Each key is the name of an user.
# Each value contains the accepted password and/or SSH-key.
ACCOUNTS = {
# An account with some custom configuration.
# Configuration that is not explicitly defined here is extracted based on
# the SFTPPlus group.
"test-user": {
"password": "test-pass",
# Just the public key value, in OpenSSH format.
# Without hte key type or comments.
"ssh-public-key": "AAAAB3NzaC1yc2EAAAADAQABAAAAgQC4fV6tSakDSB6ZovygLsf1iC9P3tJHePTKAPkPAWzlu5BRHcmAu0uTjn7GhrpxbjjWMwDVN0Oxzw7teI0OEIVkpnlcyM6L5mGk+X6Lc4+lAfp1YxCR9o9+FXMWSJP32jRwI+4LhWYxnYUldvAO5LDz9QeR0yKimwcjRToF6/jpLw==",
"configuration": {
"home_folder_path": "/tmp",
# EXTRA_DATA is not yet supported.
# 'extra_data': {
# 'file_api_token': 'fav1_some_value',
# },
},
},
# An account with default configuration extracted from
# the default SFTPPlus group.
# SSH-Key authentication is disabled for this user.
"default-user": {
"password": "default-pass",
"ssh-public-key": "",
"configuration": {},
},
}
async def handle_root(request):
return web.Response(text="Demo SFTPPlus API endpoints.")
async def handle_auth(request):
"""
This is triggered for authentication API calls.
"""
request_json = await get_json(request)
print("\n\n")
print("-" * 80)
print("New authentication request received")
print(json.dumps(request_json, indent=2))
if is_flaky and randint(0, _FLAKY_DEGREE) == 0:
print("TRIGGERING AN EMULATED FAILURE")
return web.Response(status=500, text="Failed to process the request")
credentials = request_json["credentials"]
account = ACCOUNTS.get(credentials["username"], None)
if account is None:
# This is not an account handled by this authentication API.
# Inform SFTPPus that it can try to authenticate the user via other
# method (LDAP, or another HTTP authentication server).
print("UNKNOWN USER")
return web.Response(
status=401, text="User not handled by our API. Try other method."
)
response = {"account": account.get("configuration", {})}
if credentials["type"] in ["password", "password-basic-auth"]:
# We have password based authentication.
if credentials["content"] != account["password"]:
print("INVALID PASSWORD")
return web.Response(status=403, text="Password rejected.")
# Valid password.
print("VALID PASSWORD")
return web.json_response(response)
if credentials["type"] == "ssh-key":
# We have SSH-key based authentication.
# The keys are encoded as BASE64, but we compare them as bytes.
if base64.b64decode(credentials["content"]) != base64.b64decode(
account["ssh-public-key"]
):
print("INVALID SSH-KEY")
return web.Response(status=403, text="SSH-Key rejected.")
# Valid SSH key authentication.
print("VALID SSH-KEY")
return web.json_response(response)
return web.Response(status=403, text="Credentials type not supported.")
async def handle_event(request):
"""
This is triggered by the event handler API calls.
"""
print("\n\n")
print("-" * 80)
print("New event handler call")
print("-" * 80)
print("Headers:")
for key, value in request.headers.items():
print(f" {key}: {value}")
print("-" * 80)
print("Payload:")
await get_json(request)
if is_flaky and randint(0, _FLAKY_DEGREE) == 0:
print("TRIGGERING AN EMULATED FAILURE")
return web.Response(status=500, text="Failed to process the request")
# An empty response body can be used to confirm that the event
# was received successfully by the API server.
# This instruct SFTPPlus not to retry.
return web.Response(status=204, text="")
async def get_json(request):
"""
Return the json dict from `request`.
It also logs the JSON
"""
result = {}
try:
result = await request.json()
except json.JSONDecodeError:
print("INVALID JSON RECEIVED")
text = await request.text()
print(text)
result = {}
else:
print(json.dumps(result, indent=2))
return result
app = web.Application()
app.add_routes(
[
web.get("/", handle_root),
web.post("/auth-api", handle_auth),
web.post("/event-api", handle_event),
]
)
ssl_context = None
if certificate:
ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
ssl_context.load_cert_chain(certificate, certificate)
if __name__ == "__main__":
web.run_app(app, host=address, port=port, ssl_context=ssl_context)
| 31.220513 | 233 | 0.655388 | 750 | 6,088 | 5.238667 | 0.330667 | 0.022907 | 0.034614 | 0.040977 | 0.109443 | 0.094681 | 0.071774 | 0.060575 | 0.060575 | 0.060575 | 0 | 0.020869 | 0.236531 | 6,088 | 194 | 234 | 31.381443 | 0.824441 | 0.311597 | 0 | 0.156863 | 0 | 0 | 0.245565 | 0.051698 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.068627 | 0.068627 | 0 | 0.176471 | 0.215686 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
81ceeac6fb9c99499e11e6ba24211d641629642f | 4,355 | py | Python | src/houdini_package_runner/items/base.py | captainhammy/houdini_package_runner | 40f8b60ebe32c64fd9b37328a9a5eefacd1c6ebd | [
"MIT"
] | 3 | 2022-02-06T23:31:17.000Z | 2022-02-07T11:10:03.000Z | src/houdini_package_runner/items/base.py | captainhammy/houdini_package_runner | 40f8b60ebe32c64fd9b37328a9a5eefacd1c6ebd | [
"MIT"
] | null | null | null | src/houdini_package_runner/items/base.py | captainhammy/houdini_package_runner | 40f8b60ebe32c64fd9b37328a9a5eefacd1c6ebd | [
"MIT"
] | null | null | null | """This module contains a base runnable item."""
# =============================================================================
# IMPORTS
# =============================================================================
# Future
from __future__ import annotations
# Standard Library
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List
# Imports for type checking.
if TYPE_CHECKING:
import pathlib
import houdini_package_runner.runners.base
# =============================================================================
# CLASSES
# =============================================================================
class BaseItem(ABC):
"""Base class for a runnable item.
:param write_back: Whether the item should write itself back to disk.
"""
def __init__(self, write_back: bool = False) -> None:
self._contents_changed = False
self._ignored_builtins: List[str] = []
self._is_single_line = False
self._is_test_item = False
self._write_back = write_back
def __repr__(self):
return f"<{self.__class__.__name__}>"
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def contents_changed(self) -> bool:
"""Whether the contents of the item have changed."""
return self._contents_changed
@contents_changed.setter
def contents_changed(self, contents_changed: bool):
self._contents_changed = contents_changed
# -------------------------------------------------------------------------
@property
def ignored_builtins(self) -> List[str]:
"""A list of known builtins to ignore for checks which look for imports."""
return self._ignored_builtins
# -------------------------------------------------------------------------
@property
def is_single_line(self) -> bool:
"""Whether the item code on a single line."""
return self._is_single_line
# -------------------------------------------------------------------------
@property
def is_test_item(self) -> bool:
"""Whether the item is a test related item."""
return self._is_test_item
@is_test_item.setter
def is_test_item(self, is_test_item: bool):
self._is_test_item = is_test_item
# -------------------------------------------------------------------------
@property
def write_back(self) -> bool:
"""Whether the item should write changes back."""
return self._write_back
@write_back.setter
def write_back(self, write_back):
self._write_back = write_back
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
@abstractmethod
def process(
self, runner: houdini_package_runner.runners.base.HoudiniPackageRunner
) -> int:
"""Process an item.
:param runner: The package runner processing the item.
:return: The process return code.
"""
class BaseFileItem(BaseItem):
"""Base class for a runnable item.
:param path: The path for the item.
:param write_back: Whether the item should write itself back to disk.
"""
def __init__(self, path: pathlib.Path, write_back: bool = False) -> None:
super().__init__(write_back=write_back)
self._path = path
def __repr__(self):
return f"<{self.__class__.__name__} {self.path}>"
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def path(self) -> pathlib.Path:
"""The path on disk."""
return self._path
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
@abstractmethod
def process(
self, runner: houdini_package_runner.runners.base.HoudiniPackageRunner
) -> int:
"""Process an item.
:param runner: The package runner processing the item.
:return: The process return code.
"""
| 29.828767 | 83 | 0.461538 | 382 | 4,355 | 4.971204 | 0.212042 | 0.07109 | 0.042127 | 0.029489 | 0.52396 | 0.345972 | 0.345972 | 0.293839 | 0.26119 | 0.26119 | 0 | 0 | 0.198852 | 4,355 | 145 | 84 | 30.034483 | 0.544282 | 0.473938 | 0 | 0.321429 | 0 | 0 | 0.030913 | 0.024824 | 0 | 0 | 0 | 0 | 0 | 1 | 0.267857 | false | 0 | 0.089286 | 0.035714 | 0.535714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81cfb18746180392d2ab217e02dc844bfc9a910e | 4,485 | py | Python | djangoplicity/blog/migrations/0001_initial.py | djangoplicity/blog | 2465b34228d794db9f746e314fa04657cbf18d38 | [
"BSD-3-Clause"
] | null | null | null | djangoplicity/blog/migrations/0001_initial.py | djangoplicity/blog | 2465b34228d794db9f746e314fa04657cbf18d38 | [
"BSD-3-Clause"
] | 1 | 2021-10-20T00:11:16.000Z | 2021-10-20T00:17:51.000Z | djangoplicity/blog/migrations/0001_initial.py | djangoplicity/djangoplicity-blog | 2465b34228d794db9f746e314fa04657cbf18d38 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-08-15 16:23
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import djangoplicity.archives.base
import djangoplicity.archives.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('media', '0021_auto_20170207_1749'),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('biography', models.TextField(blank=True)),
('photo', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='media.Image')),
],
),
migrations.CreateModel(
name='AuthorDescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, help_text='Optional description, e.g.: "Author: ", or "Interview with"', max_length=100)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Author')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('footer', models.TextField(blank=True, help_text='Optional footer added to the bottom of posts')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('slug', models.SlugField(help_text='Used for the URL', primary_key=True, serialize=False)),
('title', models.CharField(max_length=255)),
('subtitle', models.CharField(blank=True, help_text='Optional subtitle', max_length=255)),
('lede', models.TextField()),
('body', models.TextField()),
('discover_box', models.TextField(blank=True)),
('numbers_box', models.TextField(blank=True)),
('links', models.TextField(blank=True)),
('release_date', djangoplicity.archives.fields.ReleaseDateTimeField(blank=True, db_index=True, null=True)),
('embargo_date', djangoplicity.archives.fields.ReleaseDateTimeField(blank=True, db_index=True, null=True)),
('published', models.BooleanField(db_index=True, default=False, verbose_name='Published')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last modified')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('release_task_id', models.CharField(blank=True, max_length=64, null=True)),
('embargo_task_id', models.CharField(blank=True, max_length=64, null=True)),
('checksums', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('authors', models.ManyToManyField(through='blog.AuthorDescription', to='blog.Author')),
('banner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='media.Image', verbose_name='Banner Image')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category')),
],
options={
'ordering': ('-release_date',),
},
bases=(djangoplicity.archives.base.ArchiveModel, models.Model),
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(to='blog.Tag'),
),
migrations.AddField(
model_name='authordescription',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post'),
),
]
| 48.75 | 151 | 0.599331 | 456 | 4,485 | 5.763158 | 0.287281 | 0.044521 | 0.031963 | 0.050228 | 0.458143 | 0.393075 | 0.393075 | 0.362633 | 0.362633 | 0.362633 | 0 | 0.016216 | 0.257525 | 4,485 | 91 | 152 | 49.285714 | 0.772973 | 0.015162 | 0 | 0.361446 | 1 | 0 | 0.140462 | 0.010195 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.072289 | 0 | 0.120482 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81d742485fceccd1810f61f429cd089c6e0b112d | 1,126 | py | Python | test.py | IldusTim/QAStudy | f2f5e9c673259e7e1c8d0ab2887f28326300abe3 | [
"Apache-2.0"
] | null | null | null | test.py | IldusTim/QAStudy | f2f5e9c673259e7e1c8d0ab2887f28326300abe3 | [
"Apache-2.0"
] | null | null | null | test.py | IldusTim/QAStudy | f2f5e9c673259e7e1c8d0ab2887f28326300abe3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import math
from selenium.webdriver.support.ui import Select
import os
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
link = "http://suninjuly.github.io/explicit_wait2.html"
opt = webdriver.ChromeOptions()
opt.add_experimental_option('w3c', False)
browser = webdriver.Chrome(chrome_options=opt)
browser.implicitly_wait(5, 0.5)
browser.get(link)
button = browser.find_element_by_id("book")
price = WebDriverWait(browser, 12).until(EC.text_to_be_present_in_element((By.ID, "price"),"10000 RUR"))
button.click()
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
browser.find_element_by_class_name("btn-primary").click()
# new_window = browser.window_handles[1]
# browser.switch_to.window(new_window)
x_element = browser.find_element_by_id("input_value")
x = x_element.text
y = calc(x)
browser.find_element_by_id("answer").click()
browser.find_element_by_id("answer").send_keys(y)
browser.find_element_by_id("solve").click() | 31.277778 | 104 | 0.785968 | 174 | 1,126 | 4.867816 | 0.482759 | 0.07438 | 0.127509 | 0.141677 | 0.255018 | 0.151122 | 0 | 0 | 0 | 0 | 0 | 0.015444 | 0.079929 | 1,126 | 36 | 105 | 31.277778 | 0.802124 | 0.086146 | 0 | 0 | 0 | 0 | 0.103314 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.307692 | 0.038462 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
81d761dcf0b173ad97a22e411c04701a33909ebc | 1,224 | py | Python | django_backend/product/migrations/0002_product.py | itsmahadi007/E-Commerce-VueJS-Django | 4fc298f2181fd22c6aeb74439edef78a397d5447 | [
"MIT"
] | null | null | null | django_backend/product/migrations/0002_product.py | itsmahadi007/E-Commerce-VueJS-Django | 4fc298f2181fd22c6aeb74439edef78a397d5447 | [
"MIT"
] | 4 | 2022-01-13T03:56:36.000Z | 2022-03-12T01:01:24.000Z | django_backend/product/migrations/0002_product.py | itsmahadi007/E-Commerce-VueJS-Django | 4fc298f2181fd22c6aeb74439edef78a397d5447 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-09-01 17:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField()),
('description', models.TextField(blank=True, null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('image', models.ImageField(blank=True, null=True, upload_to='uploads/')),
('thumbnail', models.ImageField(blank=True, null=True, upload_to='uploads/')),
('data_added', models.DateTimeField(auto_now_add=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='product', to='product.category')),
],
options={
'ordering': ('-data_added',),
},
),
]
| 38.25 | 140 | 0.580882 | 124 | 1,224 | 5.612903 | 0.580645 | 0.034483 | 0.056034 | 0.073276 | 0.137931 | 0.137931 | 0.137931 | 0.137931 | 0.137931 | 0 | 0 | 0.027964 | 0.269608 | 1,224 | 31 | 141 | 39.483871 | 0.750559 | 0.036765 | 0 | 0 | 1 | 0 | 0.122345 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.08 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81dbffa128ea7c27541a642445edf3ebd5fd3197 | 8,918 | py | Python | os_migrate/plugins/modules/import_workload_create_instance.py | jbadiapa/os-migrate | 19b591a672bc9e4af72e62dbd96be94a238a6dc2 | [
"Apache-2.0"
] | 35 | 2020-01-22T18:38:27.000Z | 2022-03-22T16:19:56.000Z | os_migrate/plugins/modules/import_workload_create_instance.py | jbadiapa/os-migrate | 19b591a672bc9e4af72e62dbd96be94a238a6dc2 | [
"Apache-2.0"
] | 292 | 2019-12-09T11:15:26.000Z | 2022-03-31T14:37:52.000Z | os_migrate/plugins/modules/import_workload_create_instance.py | jbadiapa/os-migrate | 19b591a672bc9e4af72e62dbd96be94a238a6dc2 | [
"Apache-2.0"
] | 32 | 2019-12-09T11:09:44.000Z | 2022-03-24T01:13:31.000Z | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: import_workload_create_instance
short_description: Create NBD exports of OpenStack volumes
extends_documentation_fragment: openstack
version_added: "2.9.0"
author: "OpenStack tenant migration tools (@os-migrate)"
description:
- "Take an instance from an OS-Migrate YAML structure, and export its volumes over NBD."
options:
auth:
description:
- Dictionary with parameters for chosen auth type on the destination cloud.
required: true
type: dict
auth_type:
description:
- Auth type plugin for destination OpenStack cloud. Can be omitted if using password authentication.
required: false
type: str
region_name:
description:
- Destination OpenStack region name. Can be omitted if using default region.
required: false
type: str
availability_zone:
description:
- Availability zone.
required: false
type: str
cloud:
description:
- Ignored. Present for backwards compatibility.
required: false
type: raw
validate_certs:
description:
- Validate HTTPS certificates when logging in to OpenStack.
required: false
type: bool
data:
description:
- Data structure with server parameters as loaded from OS-Migrate workloads YAML file.
required: true
type: dict
block_device_mapping:
description:
- A block_device_mapping_v2 structure from the transfer_volumes module.
- Used to attach destination volumes to the new instance in the right order.
required: true
type: list
elements: dict
'''
EXAMPLES = '''
main.yml:
- name: validate loaded resources
os_migrate.os_migrate.validate_resource_files:
paths:
- "{{ os_migrate_data_dir }}/workloads.yml"
register: workloads_file_validation
when: import_workloads_validate_file
- name: read workloads resource file
os_migrate.os_migrate.read_resources:
path: "{{ os_migrate_data_dir }}/workloads.yml"
register: read_workloads
- name: get source conversion host address
os_migrate.os_migrate.os_conversion_host_info:
auth:
auth_url: https://src-osp:13000/v3
username: migrate
password: migrate
project_domain_id: default
project_name: migration-source
user_domain_id: default
server_id: ce4dda96-5d8e-4b67-aee2-9845cdc943fe
register: os_src_conversion_host_info
- name: get destination conversion host address
os_migrate.os_migrate.os_conversion_host_info:
auth:
auth_url: https://dest-osp:13000/v3
username: migrate
password: migrate
project_domain_id: default
project_name: migration-destination
user_domain_id: default
server_id: 2d2afe57-ace5-4187-8fca-5f10f9059ba1
register: os_dst_conversion_host_info
- name: import workloads
include_tasks: workload.yml
loop: "{{ read_workloads.resources }}"
workload.yml:
- block:
- name: preliminary setup for workload import
os_migrate.os_migrate.import_workload_prelim:
auth:
auth_url: https://dest-osp:13000/v3
username: migrate
password: migrate
project_domain_id: default
project_name: migration-destination
user_domain_id: default
validate_certs: False
src_conversion_host: "{{ os_src_conversion_host_info.openstack_conversion_host }}"
src_auth:
auth_url: https://src-osp:13000/v3
username: migrate
password: migrate
project_domain_id: default
project_name: migration-source
user_domain_id: default
src_validate_certs: False
data: "{{ item }}"
data_dir: "{{ os_migrate_data_dir }}"
register: prelim
- debug:
msg:
- "{{ prelim.server_name }} log file: {{ prelim.log_file }}"
- "{{ prelim.server_name }} progress file: {{ prelim.state_file }}"
when: prelim.changed
- name: expose source volumes
os_migrate.os_migrate.import_workload_export_volumes:
auth: "{{ os_migrate_src_auth }}"
auth_type: "{{ os_migrate_src_auth_type|default(omit) }}"
region_name: "{{ os_migrate_src_region_name|default(omit) }}"
validate_certs: "{{ os_migrate_src_validate_certs|default(omit) }}"
ca_cert: "{{ os_migrate_src_ca_cert|default(omit) }}"
client_cert: "{{ os_migrate_src_client_cert|default(omit) }}"
client_key: "{{ os_migrate_src_client_key|default(omit) }}"
conversion_host:
"{{ os_src_conversion_host_info.openstack_conversion_host }}"
data: "{{ item }}"
log_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.log"
state_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.state"
ssh_key_path: "{{ os_migrate_conversion_keypair_private_path }}"
register: exports
when: prelim.changed
- name: transfer volumes to destination
os_migrate.os_migrate.import_workload_transfer_volumes:
auth: "{{ os_migrate_dst_auth }}"
auth_type: "{{ os_migrate_dst_auth_type|default(omit) }}"
region_name: "{{ os_migrate_dst_region_name|default(omit) }}"
validate_certs: "{{ os_migrate_dst_validate_certs|default(omit) }}"
ca_cert: "{{ os_migrate_dst_ca_cert|default(omit) }}"
client_cert: "{{ os_migrate_dst_client_cert|default(omit) }}"
client_key: "{{ os_migrate_dst_client_key|default(omit) }}"
data: "{{ item }}"
conversion_host:
"{{ os_dst_conversion_host_info.openstack_conversion_host }}"
ssh_key_path: "{{ os_migrate_conversion_keypair_private_path }}"
transfer_uuid: "{{ exports.transfer_uuid }}"
src_conversion_host_address:
"{{ os_src_conversion_host_info.openstack_conversion_host.address }}"
volume_map: "{{ exports.volume_map }}"
state_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.state"
log_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.log"
register: transfer
when: prelim.changed
- name: create destination instance
os_migrate.os_migrate.import_workload_create_instance:
auth: "{{ os_migrate_dst_auth }}"
auth_type: "{{ os_migrate_dst_auth_type|default(omit) }}"
region_name: "{{ os_migrate_dst_region_name|default(omit) }}"
validate_certs: "{{ os_migrate_dst_validate_certs|default(omit) }}"
ca_cert: "{{ os_migrate_dst_ca_cert|default(omit) }}"
client_cert: "{{ os_migrate_dst_client_cert|default(omit) }}"
client_key: "{{ os_migrate_dst_client_key|default(omit) }}"
data: "{{ item }}"
block_device_mapping: "{{ transfer.block_device_mapping }}"
register: os_migrate_destination_instance
when: prelim.changed
rescue:
- fail:
msg: "Failed to import {{ item.params.name }}!"
'''
RETURN = '''
server_id:
description: The ID of the newly created server.
returned: On successful creation of migrated server on destination cloud.
type: str
sample: 059635b7-451f-4a64-978a-7c2e9e4c15ff
'''
from ansible.module_utils.basic import AnsibleModule
# Import openstack module utils from ansible_collections.openstack.cloud.plugins as per ansible 3+
try:
from ansible_collections.openstack.cloud.plugins.module_utils.openstack \
import openstack_full_argument_spec, openstack_cloud_from_module
except ImportError:
# If this fails fall back to ansible < 3 imports
from ansible.module_utils.openstack \
import openstack_full_argument_spec, openstack_cloud_from_module
from ansible_collections.os_migrate.os_migrate.plugins.module_utils import server
def run_module():
argument_spec = openstack_full_argument_spec(
auth=dict(type='dict', no_log=True, required=True),
data=dict(type='dict', required=True),
block_device_mapping=dict(type='list', required=True, elements='dict'),
)
result = dict(
changed=False,
)
module = AnsibleModule(
argument_spec=argument_spec,
)
sdk, conn = openstack_cloud_from_module(module)
block_device_mapping = module.params['block_device_mapping']
ser_server = server.Server.from_data(module.params['data'])
sdk_server = ser_server.create(conn, block_device_mapping)
# Some info (e.g. flavor ID) will only become available after the
# server is in ACTIVE state, we need to wait for it.
sdk_server = conn.compute.wait_for_server(sdk_server, failures=['ERROR'], wait=600)
dst_ser_server = server.Server.from_sdk(conn, sdk_server)
if sdk_server:
result['changed'] = True
result['server'] = dst_ser_server.data
result['server_id'] = sdk_server.id
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
| 33.152416 | 106 | 0.703185 | 1,107 | 8,918 | 5.331527 | 0.219512 | 0.079295 | 0.028465 | 0.027448 | 0.41918 | 0.39207 | 0.346154 | 0.333955 | 0.293121 | 0.277194 | 0 | 0.012731 | 0.198475 | 8,918 | 268 | 107 | 33.276119 | 0.812955 | 0.030837 | 0 | 0.394619 | 0 | 0 | 0.813846 | 0.221463 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008969 | false | 0.022422 | 0.067265 | 0 | 0.076233 | 0.004484 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
81e620b1dfd869927a5135342a7294ba02276c08 | 1,183 | py | Python | src/config.py | BRAVO68WEB/architus | 21b9f94a64b142ee6e9b5efd79bd872a13ce8f6a | [
"MIT"
] | null | null | null | src/config.py | BRAVO68WEB/architus | 21b9f94a64b142ee6e9b5efd79bd872a13ce8f6a | [
"MIT"
] | null | null | null | src/config.py | BRAVO68WEB/architus | 21b9f94a64b142ee6e9b5efd79bd872a13ce8f6a | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# from src.commands import *
# import src.commands as command_modules
secret_token = None
db_user = None
db_pass = None
sessions = {}
try:
lines = [line.rstrip('\n') for line in open('.secret_token')]
secret_token = lines[0]
db_user = lines[1]
db_pass = lines[2]
client_id = lines[3]
client_secret = lines[4]
twitter_consumer_key = lines[5]
twitter_consumer_secret = lines[6]
twitter_access_token_key = lines[7]
twitter_access_token_secret = lines[8]
scraper_token = lines[9]
except Exception as e:
print(e)
print('error reading .secret_token, make it you aut')
def get_session(pid=None):
if pid in sessions:
return sessions[pid]
print("creating postgres session")
try:
engine = create_engine("postgresql://{}:{}@localhost/autbot".format(db_user, db_pass))
Session = sessionmaker(bind=engine)
session = Session()
sessions[pid] = session
except Exception as e:
session = None
print('failed to connect to database')
print(e)
return session
session = get_session()
| 25.170213 | 94 | 0.674556 | 158 | 1,183 | 4.873418 | 0.449367 | 0.057143 | 0.046753 | 0.046753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010917 | 0.225697 | 1,183 | 46 | 95 | 25.717391 | 0.829694 | 0.054945 | 0 | 0.166667 | 0 | 0 | 0.132735 | 0.03139 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0.083333 | 0.055556 | 0 | 0.138889 | 0.138889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
c48c8a45a8bc31ea98b3b0eb49ac12298185c634 | 2,426 | py | Python | kenlm_training/cc_net/tokenizer.py | ruinunca/data_tooling | 297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff | [
"Apache-2.0"
] | 435 | 2019-11-04T22:35:50.000Z | 2022-03-29T20:15:07.000Z | kenlm_training/cc_net/tokenizer.py | ruinunca/data_tooling | 297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff | [
"Apache-2.0"
] | 331 | 2021-11-02T00:30:56.000Z | 2022-03-08T16:48:13.000Z | kenlm_training/cc_net/tokenizer.py | ruinunca/data_tooling | 297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff | [
"Apache-2.0"
] | 66 | 2019-11-06T01:28:12.000Z | 2022-03-01T09:18:32.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from typing import Dict, Optional
import sacremoses # type: ignore
from cc_net import jsonql, text_normalizer
class RobustTokenizer(jsonql.Transformer):
"""Moses tokenizer with the expected preprocessing."""
LANG_WITHOUT_ACCENT = {"en", "my"}
def __init__(self, lang: str):
super().__init__()
self.lang = lang
self.moses = sacremoses.MosesTokenizer(lang)
self.rm_accent = lang in self.LANG_WITHOUT_ACCENT
self.ready = True
def do(self, text: str):
text = text_normalizer.normalize(
text, accent=self.rm_accent, case=False, numbers=False, punct=True
)
text = text_normalizer.normalize_spacing_for_tok(text, language=self.lang)
return self.moses.tokenize(text, return_str=True, escape=False)
class DocTokenizer(jsonql.Transformer):
"""Tokenize the text found in `output_field and store the result in `output_field`."""
def __init__(
self,
field: str,
output_field: str = "tokenized",
language_field: str = "language",
):
super().__init__()
self.field = field
self.output_field = output_field
self.language_field = language_field
self.n_docs = 0
self.tokenizers: Dict[str, RobustTokenizer] = {}
def get_tokenizer(self, lang: str) -> Optional[RobustTokenizer]:
cache = self.tokenizers
if lang in cache:
return cache[lang]
if lang in ("th", "zh", "ja"):
# TODO find a tokenizer for those languages
return None
cache[lang] = RobustTokenizer(lang)
return cache[lang]
def do(self, document):
lang = document[self.language_field]
tok = self.get_tokenizer(lang)
if not tok:
return document
self.n_docs += 1
lines = document[self.field].split("\n")
tokenized = "\n".join(tok(l) for l in lines)
document[self.output_field] = tokenized
return document
def summary(self):
delay = (time.time() - self.start_time) / 3600
speed = self.n_docs / delay
return [
f"Tokenized {self.n_docs:_} documents in {delay:.2}h ({speed:.1} doc/s)."
]
| 30.325 | 90 | 0.626958 | 300 | 2,426 | 4.91 | 0.373333 | 0.044807 | 0.02444 | 0.03666 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004525 | 0.271228 | 2,426 | 79 | 91 | 30.708861 | 0.82862 | 0.145919 | 0 | 0.109091 | 0 | 0.018182 | 0.049148 | 0 | 0 | 0 | 0 | 0.012658 | 0 | 1 | 0.109091 | false | 0 | 0.072727 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c48caf2d700cbc3c512434c652a6ac5a08e2206b | 346 | py | Python | scripts/exercicios/ex063.py | RuanBarretodosSantos/python | 4142ccd71c4ffb4bb6a10d61c85f612758f5bb41 | [
"MIT"
] | null | null | null | scripts/exercicios/ex063.py | RuanBarretodosSantos/python | 4142ccd71c4ffb4bb6a10d61c85f612758f5bb41 | [
"MIT"
] | null | null | null | scripts/exercicios/ex063.py | RuanBarretodosSantos/python | 4142ccd71c4ffb4bb6a10d61c85f612758f5bb41 | [
"MIT"
] | null | null | null | cont = 3
t1 = 0
t2 = 1
print('-----' * 12)
print('Sequência de Fibonacci')
print('-----' * 12)
valor = int(input('Quantos termos você quer mostrar ? '))
print('~~~~~' * 12)
print(f'{t1} ➙ {t2} ' , end='➙ ')
while cont <= valor:
t3 = t1 + t2
print(f' {t3}', end=' ➙ ')
t1 = t2
t2 = t3
t3 = t1
cont += 1
print(' F I M')
| 19.222222 | 57 | 0.482659 | 54 | 346 | 3.148148 | 0.481481 | 0.123529 | 0.141176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.095618 | 0.274566 | 346 | 17 | 58 | 20.352941 | 0.569721 | 0 | 0 | 0.117647 | 0 | 0 | 0.291908 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.411765 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
c4930d25761ee9d797224e253c155e8643ca0fdb | 14,588 | py | Python | geometry_utils/tests/test_bound_box.py | NOAA-ORR-ERD/geometry_utils | 0417a8c459fb17f101945f53d048191dc22e97c0 | [
"BSD-3-Clause"
] | null | null | null | geometry_utils/tests/test_bound_box.py | NOAA-ORR-ERD/geometry_utils | 0417a8c459fb17f101945f53d048191dc22e97c0 | [
"BSD-3-Clause"
] | null | null | null | geometry_utils/tests/test_bound_box.py | NOAA-ORR-ERD/geometry_utils | 0417a8c459fb17f101945f53d048191dc22e97c0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Test code for the BBox Object
"""
import numpy as np
import pytest
from geometry_utils.bound_box import (BBox,
asBBox,
NullBBox,
InfBBox,
fromBBArray,
from_points,
)
class TestConstructors():
def test_creates(self):
B = BBox(((0, 0), (5, 5)))
assert isinstance(B, BBox)
def test_type(self):
B = np.array(((0, 0), (5, 5)))
assert not isinstance(B, BBox)
def testDataType(self):
B = BBox(((0, 0), (5, 5)))
assert B.dtype == np.float
def testShape(self):
B = BBox((0, 0, 5, 5))
assert B.shape == (2, 2)
def testShape2(self):
with pytest.raises(ValueError):
BBox((0, 0, 5))
def testShape3(self):
with pytest.raises(ValueError):
BBox((0, 0, 5, 6, 7))
def testArrayConstruction(self):
A = np.array(((4, 5), (10, 12)), np.float_)
B = BBox(A)
assert isinstance(B, BBox)
def testMinMax(self):
with pytest.raises(ValueError):
BBox((0, 0, -1, 6))
def testMinMax2(self):
with pytest.raises(ValueError):
BBox((0, 0, 1, -6))
def testMinMax3(self):
# OK to have a zero-sized BB
B = BBox(((0, 0), (0, 5)))
assert isinstance(B, BBox)
def testMinMax4(self):
# OK to have a zero-sized BB
B = BBox(((10., -34), (10., -34.0)))
assert isinstance(B, BBox)
def testMinMax5(self):
# OK to have a tiny BB
B = BBox(((0, 0), (1e-20, 5)))
assert isinstance(B, BBox)
def testMinMax6(self):
# Should catch tiny difference
with pytest.raises(ValueError):
BBox(((0, 0), (-1e-20, 5)))
class TestAsBBox():
def testPassThrough(self):
B = BBox(((0, 0), (5, 5)))
C = asBBox(B)
assert B is C
def testPassThrough2(self):
B = ((0, 0), (5, 5))
C = asBBox(B)
assert B is not C
def testPassArray(self):
# Different data type
A = np.array(((0, 0), (5, 5)))
C = asBBox(A)
assert A is not C
def testPassArray2(self):
# same data type -- should be a view
A = np.array(((0, 0), (5, 5)), np.float_)
C = asBBox(A)
A[0, 0] = -10
assert C[0, 0] == A[0, 0]
class TestIntersect():
def testSame(self):
B = BBox(((-23.5, 456), (56, 532.0)))
C = BBox(((-23.5, 456), (56, 532.0)))
assert B.Overlaps(C)
def testUpperLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((0, 12), (10, 32.0)))
assert B.Overlaps(C)
def testUpperRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 12), (25, 32.0)))
assert B.Overlaps(C)
def testLowerRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 5), (25, 15)))
assert B.Overlaps(C)
def testLowerLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 15)))
assert B.Overlaps(C)
def testBelow(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 9.2)))
assert not B.Overlaps(C)
def testAbove(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 25.001), (8.5, 32)))
assert not B.Overlaps(C)
def testLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((4, 8), (4.95, 32)))
assert not B.Overlaps(C)
def testRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((17.1, 8), (17.95, 32)))
assert not B.Overlaps(C)
def testInside(self):
B = BBox(((-15, -25), (-5, -10)))
C = BBox(((-12, -22), (-6, -8)))
assert B.Overlaps(C)
def testOutside(self):
B = BBox(((-15, -25), (-5, -10)))
C = BBox(((-17, -26), (3, 0)))
assert B.Overlaps(C)
def testTouch(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((15, 8), (17.95, 32)))
assert B.Overlaps(C)
def testCorner(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((15, 25), (17.95, 32)))
assert B.Overlaps(C)
def testZeroSize(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((15, 25), (15, 25)))
assert B.Overlaps(C)
def testZeroSize2(self):
B = BBox(((5, 10), (5, 10)))
C = BBox(((15, 25), (15, 25)))
assert not B.Overlaps(C)
def testZeroSize3(self):
B = BBox(((5, 10), (5, 10)))
C = BBox(((0, 8), (10, 12)))
assert B.Overlaps(C)
def testZeroSize4(self):
B = BBox(((5, 1), (10, 25)))
C = BBox(((8, 8), (8, 8)))
assert B.Overlaps(C)
class TestEquality():
def testSame(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((1.0, 2.0), (5., 10.)))
assert B == C
def testIdentical(self):
B = BBox(((1.0, 2.0), (5., 10.)))
assert B == B
def testNotSame(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((1.0, 2.0), (5., 10.1)))
assert not B == C
def testWithArray(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = np.array(((1.0, 2.0), (5., 10.)))
assert B == C
def testWithArray2(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = np.array(((1.0, 2.0), (5., 10.)))
assert C == B
def testWithArray3(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = np.array(((1.01, 2.0), (5., 10.)))
assert not C == B
class TestInside():
def testSame(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((1.0, 2.0), (5., 10.)))
assert B.Inside(C)
def testPoint(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((3.0, 4.0), (3.0, 4.0)))
assert B.Inside(C)
def testPointOutside(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((-3.0, 4.0), (0.10, 4.0)))
assert not B.Inside(C)
def testUpperLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((0, 12), (10, 32.0)))
assert not B.Inside(C)
def testUpperRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 12), (25, 32.0)))
assert not B.Inside(C)
def testLowerRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 5), (25, 15)))
assert not B.Inside(C)
def testLowerLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 15)))
assert not (B.Inside(C))
def testBelow(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 9.2)))
assert not (B.Inside(C))
def testAbove(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 25.001), (8.5, 32)))
assert not (B.Inside(C))
def testLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((4, 8), (4.95, 32)))
assert not (B.Inside(C))
def testRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((17.1, 8), (17.95, 32)))
assert not (B.Inside(C))
class TestPointInside():
def testPointIn(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (3.0, 4.0)
assert (B.PointInside(P))
def testUpperLeft(self):
B = BBox(((5, 10), (15, 25)))
P = (4, 30)
assert not (B.PointInside(P))
def testUpperRight(self):
B = BBox(((5, 10), (15, 25)))
P = (16, 30)
assert not (B.PointInside(P))
def testLowerRight(self):
B = BBox(((5, 10), (15, 25)))
P = (16, 4)
assert not (B.PointInside(P))
def testLowerLeft(self):
B = BBox(((5, 10), (15, 25)))
P = (-10, 5)
assert not (B.PointInside(P))
def testBelow(self):
B = BBox(((5, 10), (15, 25)))
P = (10, 5)
assert not (B.PointInside(P))
def testAbove(self):
B = BBox(((5, 10), (15, 25)))
P = (10, 25.001)
assert not (B.PointInside(P))
def testLeft(self):
B = BBox(((5, 10), (15, 25)))
P = (4, 12)
assert not (B.PointInside(P))
def testRight(self):
B = BBox(((5, 10), (15, 25)))
P = (17.1, 12.3)
assert not (B.PointInside(P))
def testPointOnTopLine(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (3.0, 10.)
assert (B.PointInside(P))
def testPointLeftTopLine(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (-3.0, 10.)
assert not (B.PointInside(P))
def testPointOnBottomLine(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (3.0, 5.)
assert (B.PointInside(P))
def testPointOnLeft(self):
B = BBox(((-10., -10.), (-1.0, -1.0)))
P = (-10, -5.)
assert (B.PointInside(P))
def testPointOnRight(self):
B = BBox(((-10., -10.), (-1.0, -1.0)))
P = (-1, -5.)
assert (B.PointInside(P))
def testPointOnBottomRight(self):
B = BBox(((-10., -10.), (-1.0, -1.0)))
P = (-1, -10.)
assert (B.PointInside(P))
class Test_from_points():
def testCreate(self):
Pts = np.array(((5, 2), (3, 4), (1, 6)), np.float64)
B = from_points(Pts)
assert (B[0, 0] == 1.0 and
B[0, 1] == 2.0 and
B[1, 0] == 5.0 and
B[1, 1] == 6.0)
def testCreateInts(self):
Pts = np.array(((5, 2), (3, 4), (1, 6)))
B = from_points(Pts)
assert (B[0, 0] == 1.0 and
B[0, 1] == 2.0 and
B[1, 0] == 5.0 and
B[1, 1] == 6.0)
def testSinglePoint(self):
Pts = np.array((5, 2), np.float_)
B = from_points(Pts)
assert (B[0, 0] == 5. and
B[0, 1] == 2.0 and
B[1, 0] == 5. and
B[1, 1] == 2.0)
def testListTuples(self):
Pts = [(3, 6.5), (13, 43.2), (-4.32, -4), (65, -23), (-0.0001,
23.432)]
B = from_points(Pts)
assert (B[0, 0] == -4.32 and
B[0, 1] == -23.0 and
B[1, 0] == 65.0 and
B[1, 1] == 43.2)
class TestMerge():
A = BBox(((-23.5, 456), (56, 532.0)))
B = BBox(((-20.3, 460), (54, 465))) # B should be completely inside A
C = BBox(((-23.5, 456), (58, 540.))) # up and to the right or A
D = BBox(((-26.5, 12), (56, 532.0)))
def testInside(self):
C = self.A.copy()
C.Merge(self.B)
assert (C == self.A)
def testFullOutside(self):
C = self.B.copy()
C.Merge(self.A)
assert (C == self.A)
def testUpRight(self):
A = self.A.copy()
A.Merge(self.C)
assert (A[0] == self.A[0] and A[1] == self.C[1])
def testDownLeft(self):
A = self.A.copy()
A.Merge(self.D)
assert (A[0] == self.D[0] and A[1] == self.A[1])
class TestWidthHeight():
B = BBox(((1.0, 2.0), (5., 10.)))
def testWidth(self):
assert (self.B.Width == 4.0)
def testWidth2(self):
assert (self.B.Height == 8.0)
def testSetW(self):
with pytest.raises(AttributeError):
self.B.Height = 6
def testSetH(self):
with pytest.raises(AttributeError):
self.B.Width = 6
class TestCenter():
B = BBox(((1.0, 2.0), (5., 10.)))
def testCenter(self):
assert ((self.B.Center == (3.0, 6.0)).all())
def testSetCenter(self):
with pytest.raises(AttributeError):
self.B.Center = (6, 5)
class TestBBarray():
BBarray = np.array((((-23.5, 456), (56, 532.0)), ((-20.3, 460),
(54, 465)), ((-23.5, 456), (58, 540.)), ((-26.5,
12), (56, 532.0))), dtype=np.float)
BB = asBBox(((-26.5, 12.), (58., 540.)))
def testJoin(self):
BB = fromBBArray(self.BBarray)
assert BB == self.BB
class TestNullBBox():
B1 = NullBBox()
B2 = NullBBox()
B3 = BBox(((1.0, 2.0), (5., 10.)))
def testValues(self):
assert (np.alltrue(np.isnan(self.B1)))
def testIsNull(self):
assert (self.B1.IsNull)
def testEquals(self):
assert ((self.B1 == self.B2) is True)
def testNotEquals(self):
assert not self.B1 == self.B3
def testNotEquals2(self):
assert not self.B3 == self.B1
def testMerge(self):
C = self.B1.copy()
C.Merge(self.B3)
assert C == self.B3, 'merge failed, got: %s' % C
def testOverlaps(self):
assert self.B1.Overlaps(self.B3) is False
def testOverlaps2(self):
assert self.B3.Overlaps(self.B1) is False
class TestInfBBox():
B1 = InfBBox()
B2 = InfBBox()
B3 = BBox(((1.0, 2.0), (5., 10.)))
NB = NullBBox()
def testValues(self):
assert (np.alltrue(np.isinf(self.B1)))
# def testIsNull(self):
# assert ( self.B1.IsNull )
def testEquals(self):
assert self.B1 == self.B2
def testNotEquals(self):
assert not self.B1 == self.B3
def testNotEquals2(self):
assert self.B1 != self.B3
def testNotEquals3(self):
assert not self.B3 == self.B1
def testMerge(self):
C = self.B1.copy()
C.Merge(self.B3)
assert C == self.B2, 'merge failed, got: %s' % C
def testMerge2(self):
C = self.B3.copy()
C.Merge(self.B1)
assert C == self.B1, 'merge failed, got: %s' % C
def testOverlaps(self):
assert (self.B1.Overlaps(self.B2) is True)
def testOverlaps2(self):
assert (self.B3.Overlaps(self.B1) is True)
def testOverlaps3(self):
assert (self.B1.Overlaps(self.B3) is True)
def testOverlaps4(self):
assert (self.B1.Overlaps(self.NB) is True)
def testOverlaps5(self):
assert (self.NB.Overlaps(self.B1) is True)
class TestSides():
B = BBox(((1.0, 2.0), (5., 10.)))
def testLeft(self):
assert self.B.Left == 1.0
def testRight(self):
assert self.B.Right == 5.0
def testBottom(self):
assert self.B.Bottom == 2.0
def testTop(self):
assert self.B.Top == 10.0
class TestAsPoly():
B = BBox(((5, 0), (10, 20)))
corners = np.array([(5., 0.), (5., 20.), (10., 20.), (10., 0.)],
dtype=np.float64)
def testCorners(self):
print(self.B.AsPoly())
assert np.array_equal(self.B.AsPoly(), self.corners)
| 25.151724 | 75 | 0.466822 | 2,088 | 14,588 | 3.25431 | 0.108238 | 0.050773 | 0.070199 | 0.04415 | 0.661221 | 0.601619 | 0.530979 | 0.464901 | 0.425313 | 0.354673 | 0 | 0.115067 | 0.335755 | 14,588 | 579 | 76 | 25.195164 | 0.586171 | 0.02221 | 0 | 0.501199 | 0 | 0 | 0.004422 | 0 | 0 | 0 | 0 | 0 | 0.235012 | 1 | 0.254197 | false | 0.009592 | 0.007194 | 0 | 0.340528 | 0.002398 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c49d9514c95f15c6be6ba6695dcb54d27f071828 | 347 | py | Python | CodeChef/Contest/June Long/pricecon.py | GSri30/Competetive_programming | 0dc1681500a80b6f0979d0dc9f749357ee07bcb8 | [
"MIT"
] | 22 | 2020-01-03T17:32:00.000Z | 2021-11-07T09:31:44.000Z | CodeChef/Contest/June Long/pricecon.py | GSri30/Competetive_programming | 0dc1681500a80b6f0979d0dc9f749357ee07bcb8 | [
"MIT"
] | 10 | 2020-09-30T09:41:18.000Z | 2020-10-11T11:25:09.000Z | CodeChef/Contest/June Long/pricecon.py | GSri30/Competetive_programming | 0dc1681500a80b6f0979d0dc9f749357ee07bcb8 | [
"MIT"
] | 25 | 2019-10-14T19:25:01.000Z | 2021-05-26T08:12:20.000Z | test = int(input())
while test > 0 :
n,k = map(int,input().split())
p = list(map(int,input().split()))
original = 0
later = 0
for i in p :
if i > k :
later += k
original += i
else :
later += i
original += i
print(original-later)
test -= 1 | 23.133333 | 39 | 0.414986 | 43 | 347 | 3.348837 | 0.465116 | 0.166667 | 0.152778 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020942 | 0.449568 | 347 | 15 | 40 | 23.133333 | 0.732984 | 0 | 0 | 0.133333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c49e67e8dbe87dd913b66006fd7f5daf6198c333 | 2,948 | py | Python | src/utils/Shell.py | vlab-cs-ucsb/quacky | c031577883550820e2586ce530e59eb30aeccc37 | [
"BSD-2-Clause"
] | 1 | 2022-02-28T18:10:29.000Z | 2022-02-28T18:10:29.000Z | src/utils/Shell.py | vlab-cs-ucsb/quacky | c031577883550820e2586ce530e59eb30aeccc37 | [
"BSD-2-Clause"
] | null | null | null | src/utils/Shell.py | vlab-cs-ucsb/quacky | c031577883550820e2586ce530e59eb30aeccc37 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 18 22:20:01 2014
@author: baki
"""
import shlex
from subprocess import Popen, PIPE
from .Log import Log
class Shell:
def __init__(self, TAG=""):
self.log = Log(TAG=TAG)
self.current_process = None
self.process_output = None
def setTag(self, tag):
self.log.setTag(tag)
def runcmd(self, cmd, cwd=None, shell=False):
# self.log.v("cmd: {}\n with params: cwd={}, shell={}".format(cmd, cwd, shell))
args = shlex.split(cmd)
p = Popen(args, stdout=PIPE, stderr=PIPE, cwd=cwd, shell=shell)
out, err = p.communicate()
if out:
out = out.decode("ascii")
# self.log.v("cmd output: {}\n".format(out))
if err:
err = err.decode("ascii")
# self.log.v("cmd error: {}\n".format(err))
return out, err
def runcmdBgrnd(self, cmd, out=PIPE, cwd=None, shell=False):
assert self.current_process == None, "currently, one shell object supports only one background process"
self.log.v("cmd: {}\n with params: out={}, cwd={}, shell={}".format(cmd, out, cwd, shell))
redirect_to = out
if out is not PIPE:
assert self.process_output == None, "currently, one shell object supports only one background process"
redirect_to = open(out, "w")
args = shlex.split(cmd)
p = Popen(args, stdout=redirect_to, stderr=redirect_to, cwd=cwd, shell=shell)
self.current_process = p
self.process_output = redirect_to
return p
def kill(self, process=None):
if process is None:
process = self.current_process
process and process.kill()
self.process_output and self.process_output.close()
def terminate(self, process=None):
if process is None:
process = self.current_process
process and process.terminate()
self.process_output and self.process_output.close()
def runGrep(self, search, subject, options):
cmd = "grep {} \"{}\" {}".format(options, search, subject)
return self.runcmd(cmd)
def rm(self, name):
cmd = "rm {}".format(name)
return self.runcmd(cmd)
def rmdir(self, name):
cmd = "rmdir {}".format(name)
return self.runcmd(cmd)
def rmrdir(self, name):
cmd = "rm -r {}".format(name)
return self.runcmd(cmd)
def mv(self, src, dst):
cmd = "mv {} {}".format(src, dst)
return self.runcmd(cmd)
def cp(self, src, dst):
cmd = "cp -r {} {}".format(src, dst)
return self.runcmd(cmd)
def mkdir(self, name):
cmd = "mkdir {} -p".format(name)
return self.runcmd(cmd)
def clean(self, name):
self.rmrdir(name)
self.mkdir(name)
| 32.043478 | 119 | 0.557327 | 372 | 2,948 | 4.360215 | 0.236559 | 0.061036 | 0.073366 | 0.081998 | 0.446363 | 0.432799 | 0.405672 | 0.29963 | 0.217016 | 0.161529 | 0 | 0.006394 | 0.31038 | 2,948 | 91 | 120 | 32.395604 | 0.791441 | 0.080393 | 0 | 0.230769 | 0 | 0 | 0.093041 | 0 | 0 | 0 | 0 | 0 | 0.030769 | 1 | 0.215385 | false | 0 | 0.046154 | 0 | 0.415385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c4a64cd498868ef1b6019445d7127a1f346b9fe4 | 13,670 | py | Python | envi/registers.py | ConfusedMoonbear/vivisect | 8d6048037f85f745cd11923c6a8d662c150fe330 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-12-11T19:13:59.000Z | 2019-12-11T19:13:59.000Z | envi/registers.py | ConfusedMoonbear/vivisect | 8d6048037f85f745cd11923c6a8d662c150fe330 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | envi/registers.py | ConfusedMoonbear/vivisect | 8d6048037f85f745cd11923c6a8d662c150fe330 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """
Similar to the memory subsystem, this is a unified way to
access information about objects which contain registers
"""
import envi.bits as e_bits
from envi.const import *
class InvalidRegisterName(Exception):
pass
class RegisterContext:
def __init__(self, regdef=(), metas=(), pcindex=None, spindex=None, srindex=None):
"""
Hand in a register definition which consists of
a list of (<name>, <width>) tuples.
"""
self.loadRegDef(regdef)
self.loadRegMetas(metas)
self.setRegisterIndexes(pcindex, spindex, srindex=srindex)
self._rctx_dirty = False
def getRegisterSnap(self):
"""
Use this to bulk save off the register state.
"""
return list(self._rctx_vals)
def setRegisterSnap(self, snap):
"""
Use this to bulk restore the register state.
NOTE: This may only be used under the assumption that the
RegisterContext has been initialized the same way
(like context switches in tracers, or emulaction snaps)
"""
self._rctx_vals = list(snap)
def isDirty(self):
"""
Returns true if registers in this context have been modififed
since their import.
"""
return self._rctx_dirty
def setIsDirty(self, bool):
self._rctx_dirty = bool
def setRegisterIndexes(self, pcindex, spindex, srindex=None):
self._rctx_pcindex = pcindex
self._rctx_spindex = spindex
self._rctx_srindex = srindex
def loadRegDef(self, regdef, defval=0):
"""
Load a register definition. A register definition consists
of a list of tuples with the following format:
(regname, regwidth)
NOTE: All widths in envi RegisterContexts are in bits.
"""
self._rctx_regdef = regdef # Save this for snaps etc..
self._rctx_names = {}
self._rctx_ids = {}
self._rctx_widths = []
self._rctx_vals = []
self._rctx_masks = []
for i, (name, width) in enumerate(regdef):
self._rctx_names[name] = i
self._rctx_ids[i] = name
self._rctx_widths.append(width)
self._rctx_masks.append((2**width)-1)
self._rctx_vals.append(defval)
def getRegDef(self):
return self._rctx_regdef
def loadRegMetas(self, metas, statmetas=None):
"""
Load a set of defined "meta" registers for this architecture. Meta
registers are defined as registers who exist as a subset of the bits
in some other "real" register. The argument metas is a list of tuples
with the following format:
(regname, regidx, reg_shift_offset, reg_width)
The given example is for the AX register in the i386 subsystem
regname: "ax"
reg_shift_offset: 0
reg_width: 16
Optionally a set of status meta registers can be loaded as well.
The argument is a list of tuples with the following format:
(regname, regidx, reg_shift_offset, reg_width, description)
"""
self._rctx_regmetas = metas
for name, idx, offset, width in metas:
self.addMetaRegister(name, idx, offset, width)
self._rctx_statmetas = statmetas
def addMetaRegister(self, name, idx, offset, width):
"""
Meta registers are registers which are really just directly
addressable parts of already existing registers (eax -> al).
To add a meta register, you give the name, the idx of the *real*
register, the width of the meta reg, and it's left shifted (in bits)
offset into the real register value. The RegisterContext will take
care of accesses after that.
"""
newidx = (offset << 24) + (width << 16) + idx
self._rctx_names[name] = newidx
self._rctx_ids[newidx] = name
def isMetaRegister(self, index):
return (index & 0xffff) != index
def _rctx_Import(self, sobj):
"""
Given an object with attributes with the same names as
registers in our context, populate our values from it.
NOTE: This also clears the dirty flag
"""
# On import from a structure, we are clean again.
self._rctx_dirty = False
for name,idx in self._rctx_names.items():
# Skip meta registers
if (idx & 0xffff) != idx:
continue
x = getattr(sobj, name, None)
if x != None:
self._rctx_vals[idx] = x
def _rctx_Export(self, sobj):
"""
Given an object with attributes with the same names as
registers in our context, set the ones he has to match
our values.
"""
for name,idx in self._rctx_names.items():
# Skip meta registers
if (idx & 0xffff) != idx:
continue
if hasattr(sobj, name):
setattr(sobj, name, self._rctx_vals[idx])
def getRegisterInfo(self, meta=False):
"""
Return an object which can be stored off, and restored
to re-initialize a register context. (much like snapshot
but it takes the definitions with it)
"""
regdef = self._rctx_regdef
regmeta = self._rctx_regmetas
pcindex = self._rctx_pcindex
spindex = self._rctx_spindex
snap = self.getRegisterSnap()
return (regdef, regmeta, pcindex, spindex, snap)
def setRegisterInfo(self, info):
regdef, regmeta, pcindex, spindex, snap = info
self.loadRegDef(regdef)
self.loadRegMetas(regmeta)
self.setRegisterIndexes(pcindex, spindex)
self.setRegisterSnap(snap)
def getRegisterName(self, index):
return self._rctx_ids.get(index,"REG%.8x" % index)
def getProgramCounter(self):
"""
Get the value of the program counter for this register context.
"""
return self.getRegister(self._rctx_pcindex)
def setProgramCounter(self, value):
"""
Set the value of the program counter for this register context.
"""
self.setRegister(self._rctx_pcindex, value)
def getStackCounter(self):
return self.getRegister(self._rctx_spindex)
def setStackCounter(self, value):
self.setRegister(self._rctx_spindex, value)
def hasStatusRegister(self):
'''
Returns True if this context is aware of a status register.
'''
if self._rctx_srindex == None:
return False
return True
def getStatusRegNameDesc(self):
'''
Return a list of status register names and descriptions.
'''
return [(name, desc) for name, idx, offset, width, desc in self._rctx_statmetas]
def getStatusRegister(self):
'''
Gets the status register for this register context.
'''
return self.getRegister(self._rctx_srindex)
def setStatusRegister(self, value):
'''
Sets the status register for this register context.
'''
self.setRegister(self._rctx_srindex, value)
def getStatusFlags(self):
'''
Return a dictionary of reg name and reg value for the meta registers
that are part of the status register.
'''
ret = {}
for name, idx, offset, width, desc in self._rctx_statmetas:
ret[name] = self.getRegisterByName(name)
return ret
def getRegisterByName(self, name):
idx = self._rctx_names.get(name)
if idx == None:
raise InvalidRegisterName("Unknown Register: %s" % name)
return self.getRegister(idx)
def setRegisterByName(self, name, value):
idx = self._rctx_names.get(name)
if idx == None:
raise InvalidRegisterName("Unknown Register: %s" % name)
self.setRegister(idx, value)
def getRegisterNames(self):
'''
Returns a list of the 'real' (non meta) registers.
'''
regs = [rname for rname, ridx in self._rctx_names.items()
if not self.isMetaRegister(ridx)]
return regs
def getRegisterNameIndexes(self):
'''
Return a list of all the 'real' (non meta) registers and their indexes.
Example: for regname, regidx in x.getRegisterNameIndexes():
'''
regs = [(rname, ridx) for rname, ridx in self._rctx_names.items()
if not self.isMetaRegister(ridx)]
return regs
def getRegisters(self):
"""
Get all the *real* registers from this context as a dictionary of name
value pairs.
"""
ret = {}
for name,idx in self._rctx_names.items():
if (idx & 0xffff) != idx:
continue
ret[name] = self.getRegister(idx)
return ret
def setRegisters(self, regdict):
"""
For any name value pairs in the specified dictionary, set the current
register values in this context.
"""
for name,value in regdict.items():
self.setRegisterByName(name, value)
def getRegisterIndex(self, name):
"""
Get a register index by name.
(faster to use the index multiple times)
"""
return self._rctx_names.get(name)
def getRegisterWidth(self, index):
"""
Return the width of the register which lives at the specified
index (width is always in bits).
"""
ridx = index & 0xffff
if ridx == index:
return self._rctx_widths[index]
width = (index >> 16) & 0xff
return width
def getRegister(self, index):
"""
Return the current value of the specified register index.
"""
ridx = index & 0xffff
value = self._rctx_vals[ridx]
if ridx != index:
value = self._xlateToMetaReg(index, value)
return value
def getMetaRegInfo(self, index):
'''
Return the appropriate realreg, shift, mask info
for the specified metareg idx (or None if it's not
meta).
Example:
real_reg, lshift, mask = r.getMetaRegInfo(x)
'''
ridx = index & 0xffff
if ridx == index:
return None
offset = (index >> 24) & 0xff
width = (index >> 16) & 0xff
mask = (2**width)-1
return ridx, offset, mask
def _xlateToMetaReg(self, index, value):
'''
Translate a register value to the meta register value
(used when getting a meta register)
'''
ridx = index & 0xffff
offset = (index >> 24) & 0xff
width = (index >> 16) & 0xff
mask = (2**width)-1
if offset != 0:
value >>= offset
return value & mask
def _xlateToNativeReg(self, index, value):
'''
Translate a register value to the native register value
(used when setting a meta register)
'''
ridx = index & 0xffff
width = (index >> 16) & 0xff
offset = (index >> 24) & 0xff
# FIXME is it faster to generate or look these up?
mask = (2 ** width) - 1
mask = mask << offset
# NOTE: basewidth is in *bits*
basewidth = self._rctx_widths[ridx]
basemask = (2 ** basewidth) - 1
# cut a whole in basemask at the size/offset of mask
finalmask = basemask ^ mask
curval = self._rctx_vals[ridx]
if offset:
value <<= offset
return value | (curval & finalmask)
def setRegister(self, index, value):
"""
Set a register value by index.
"""
self._rctx_dirty = True
ridx = index & 0xffff
# If it's a meta register index, lets mask it into
# the real thing...
if ridx != index:
value = self._xlateToNativeReg(index, value)
self._rctx_vals[ridx] = (value & self._rctx_masks[ridx])
def getRealRegisterNameByIdx(self, regidx):
"""
Returns the Name of the Containing register (in the case
of meta-registers) or the name of the register.
(by Index)
"""
return self.getRegisterName(regidx& RMETA_NMASK)
def getRealRegisterName(self, regname):
"""
Returns the Name of the Containing register (in the case
of meta-registers) or the name of the register.
"""
ridx = self.getRegisterIndex(regname)
if ridx != None:
return self.getRegisterName(ridx & RMETA_NMASK)
return regname
def addLocalEnums(l, regdef):
"""
Update a dictionary (or module locals) with REG_FOO index
values for all the base registers defined in regdef.
"""
for i,(rname,width) in enumerate(regdef):
l["REG_%s" % rname.upper()] = i
def addLocalStatusMetas(l, metas, statmetas, regname):
'''
Dynamically create data based on the status register meta register
definition.
Adds new meta registers and bitmask constants.
'''
for metaname, idx, offset, width, desc in statmetas:
# create meta registers
metas.append( (metaname, idx, offset, width) )
# create local bitmask constants (EFLAGS_%)
l['%s_%s' % (regname, metaname)] = 1 << offset # TODO: fix for arbitrary width
def addLocalMetas(l, metas):
"""
Update a dictionary (or module locals) with REG_FOO index
values for all meta registers defined in metas.
"""
for name, idx, offset, width in metas:
l["REG_%s" % name.upper()] = (offset << 24) | (width << 16) | idx
| 31.643519 | 88 | 0.59744 | 1,620 | 13,670 | 4.953086 | 0.201235 | 0.055833 | 0.017822 | 0.01346 | 0.2834 | 0.225324 | 0.218096 | 0.205882 | 0.185444 | 0.159771 | 0 | 0.006314 | 0.316459 | 13,670 | 431 | 89 | 31.716937 | 0.852419 | 0.327944 | 0 | 0.252577 | 0 | 0 | 0.007943 | 0 | 0 | 0 | 0.010922 | 0.00464 | 0 | 1 | 0.221649 | false | 0.005155 | 0.015464 | 0.020619 | 0.391753 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c4a6ac024777e5d5757393235c2f8a34ef55a681 | 531 | py | Python | services/nris-api/backend/app/extensions.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
] | null | null | null | services/nris-api/backend/app/extensions.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
] | null | null | null | services/nris-api/backend/app/extensions.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
] | null | null | null |
from flask_caching import Cache
from flask_jwt_oidc import JwtManager
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask import current_app
from elasticapm.contrib.flask import ElasticAPM
from .config import Config
from .helper import Api
apm = ElasticAPM()
db = SQLAlchemy()
migrate = Migrate()
jwt = JwtManager()
cache = Cache()
api = Api(
prefix=f'{Config.BASE_PATH}',
doc=f'{Config.BASE_PATH}/',
default='nris_api',
default_label='NRIS related operations')
| 23.086957 | 49 | 0.770245 | 71 | 531 | 5.619718 | 0.422535 | 0.112782 | 0.055138 | 0.075188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.145009 | 531 | 22 | 50 | 24.136364 | 0.878855 | 0 | 0 | 0 | 0 | 0 | 0.128302 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.444444 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
c4ad9991f367ca79cfc5f643798ad08df02746df | 905 | py | Python | pylbm_ui/widgets/message.py | pylbm/pylbm_ui | 0a7202ee6ee5424486ce6ade1d3b18d8139d4ffb | [
"BSD-3-Clause"
] | 3 | 2021-05-17T20:38:32.000Z | 2021-11-16T17:54:26.000Z | pylbm_ui/widgets/message.py | pylbm/pylbm_ui | 0a7202ee6ee5424486ce6ade1d3b18d8139d4ffb | [
"BSD-3-Clause"
] | 32 | 2021-04-29T13:27:13.000Z | 2021-07-01T07:22:58.000Z | pylbm_ui/widgets/message.py | pylbm/pylbm_ui | 0a7202ee6ee5424486ce6ade1d3b18d8139d4ffb | [
"BSD-3-Clause"
] | 1 | 2021-04-30T06:40:21.000Z | 2021-04-30T06:40:21.000Z | import ipyvuetify as v
class Message(v.Container):
def __init__(self, message):
self.message = v.Alert(
children=[f'{message}...'],
class_='primary--text'
)
super().__init__(
children=[
v.Row(
children=[
v.ProgressCircular(
indeterminate=True,
color='primary',
size=70,
width=4
)
],
justify='center'
),
v.Row(
children=[
self.message,
],
justify='center'
)
]
)
def update(self, new_message):
self.message.children = [f'{new_message}...'] | 26.617647 | 53 | 0.340331 | 58 | 905 | 5.12069 | 0.5 | 0.148148 | 0.121212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007576 | 0.562431 | 905 | 34 | 53 | 26.617647 | 0.742424 | 0 | 0 | 0.3 | 0 | 0 | 0.066225 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.033333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c4aef0df820c8e4498c5c1703e7a91b20097e686 | 621 | py | Python | busker/migrations/0013_auto_20200906_1933.py | tinpan-io/django-busker | 52df06b82e15572d0cd9c9d13ba2d5136585bc2d | [
"MIT"
] | 2 | 2020-09-01T12:06:07.000Z | 2021-09-24T09:54:57.000Z | busker/migrations/0013_auto_20200906_1933.py | tinpan-io/django-busker | 52df06b82e15572d0cd9c9d13ba2d5136585bc2d | [
"MIT"
] | null | null | null | busker/migrations/0013_auto_20200906_1933.py | tinpan-io/django-busker | 52df06b82e15572d0cd9c9d13ba2d5136585bc2d | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-09-06 19:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('busker', '0012_auto_20200905_2042'),
]
operations = [
migrations.AlterModelOptions(
name='downloadcode',
options={'ordering': ['id']},
),
migrations.AlterField(
model_name='file',
name='work',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='busker.downloadablework'),
),
]
| 25.875 | 133 | 0.615137 | 64 | 621 | 5.875 | 0.6875 | 0.06383 | 0.074468 | 0.117021 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.067245 | 0.257649 | 621 | 23 | 134 | 27 | 0.748373 | 0.072464 | 0 | 0.117647 | 1 | 0 | 0.151568 | 0.080139 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c4b186ebba7523cfef5343184718edecec88a7e6 | 10,731 | py | Python | kronos/utils.py | jtaghiyar/kronos | 6cc3665f43b5868ad98def762c533eb74dd501e1 | [
"MIT"
] | 17 | 2016-01-10T23:54:06.000Z | 2021-01-30T09:36:19.000Z | kronos/utils.py | jtaghiyar/kronos | 6cc3665f43b5868ad98def762c533eb74dd501e1 | [
"MIT"
] | 3 | 2016-10-11T02:38:01.000Z | 2017-03-14T03:27:34.000Z | kronos/utils.py | jtaghiyar/kronos | 6cc3665f43b5868ad98def762c533eb74dd501e1 | [
"MIT"
] | 6 | 2015-12-10T21:52:31.000Z | 2019-10-07T18:57:57.000Z | '''
Created on Apr 16, 2014
@author: jtaghiyar
'''
import os
import subprocess as sub
from plumber import Plumber
from job_manager import LocalJobManager
from workflow_manager import WorkFlow
from helpers import trim, make_dir, export_to_environ
class ComponentAbstract(object):
"""
component template.
"""
def __init__(self, component_name, component_parent_dir=None, seed_dir_name=None):
'''
initialize general attributes that each component must have.
'''
## export component parent directory to the PYTHONPATH env var
if component_parent_dir is not None:
export_to_environ(component_parent_dir, 'PYTHONPATH')
## import modules of the component, i.e. component_reqs and component_params.
## if component_parent_dir==None, then components directory must have been exported to
## the PYTHONPATH env var beforehand.
list_of_modules = ['component_' + x for x in['reqs', 'params']]
m = __import__(component_name, globals(), locals(), list_of_modules, -1)
if component_parent_dir is None:
component_parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(m.__file__)))
if seed_dir_name is None:
seed_dir_name = 'component_seed'
## The component_ui is NOT imported, since all the input arguments should be passed to
## the component_main from config file via updating self.args attribute that happens in
## the corresponding task of the component. Therefore, an empty namespace is initialized
## here.
import argparse
parser = argparse.ArgumentParser()
args, _ = parser.parse_known_args()
# args.__dict__['return_value'] = None
## general attribute
self.component_name = component_name
self.component_dir = component_parent_dir
self.seed_dir = os.path.join(self.component_dir, component_name, seed_dir_name)
## modules and args
self.args = args
self._modules = m
self.component_reqs = self._modules.component_reqs
self.component_params = self._modules.component_params
## from the component_reqs
self.env_vars = self.component_reqs.env_vars
self.memory = self.component_reqs.memory
self.parallel = self.component_reqs.parallel
self.requirements = self.component_reqs.requirements.copy()
self.seed_version = self.component_reqs.seed_version
self.version = self.component_reqs.version
def run(self):
"""run component via system command line locally."""
cmd, cmd_args = self.make_cmd()
ljm = LocalJobManager()
ljm.run_job(cmd, cmd_args, self.component_name)
def focus(self, cmd, cmd_args, chunk):
"update the cmd and cmd_args for each chunk."
raise NotImplementedError("focus method called before implementation")
return cmd, cmd_args
def make_cmd(self, chunk=None):
"""make a command."""
cmd = None
cmd_args = None
raise NotImplementedError("make_cmd method called before implementation")
return cmd, cmd_args
def test(self):
"""run unittest of the component."""
raise NotImplementedError("test method called before implementation")
class Task(object):
"""
Wrap one component for the following purposes:
1. to update the args passed to the component via command line.
2. to update the requirements of the component given in the config file.
3. to give access to the 'input_files', 'output_files',
'input_params', 'return_values' and 'input_arguments' of the component.
"""
def __init__(self, task_name, component):
self.task_name = task_name
self.component = component
def update_comp_args(self, **kwargs):
"""Update self.component.args, i.e. overwrite argument specified vi command line.
This can help pass the previous task's results to the parameters
of the current task.
"""
## change the Namespace object to dictionary
args_dict = vars(self.component.args)
if kwargs is not None:
kwargs = trim(kwargs, '__pipeline__')
args_dict.update(kwargs)
def update_comp_reqs(self, reqs_dict):
"""Update self.component.requirements dictionary if there are new
values given in the config file, or keep the default otherwise.
"""
## do not update the default value of a requirement
## if it is not changed in the config file
## or it is not one of the requirements of the components
d = {k:v for k,v in reqs_dict.iteritems()
if v is not None and k in self.component.requirements.keys()}
self.component.requirements.update(d)
def update_comp_env_vars(self, env_vars):
"""update the environment variables with values from the config file."""
if not self.component.env_vars:
self.component.env_vars = env_vars
else:
self.component.env_vars.update(env_vars)
def update_comp_output_filenames(self, prefix, working_dir=None, no_prefix=False):
"""update the output file names by prepending the prefix to their names."""
output_file_params = self.component.component_params.output_files.keys()
## change the Namespace object to dictionary
args_dict = vars(self.component.args)
wd = os.getcwd()
if working_dir:
os.chdir(working_dir)
for param in output_file_params:
value = args_dict.get(param)
if value is not None:
dirname = os.path.dirname(value)
self._make_dirs(dirname)
## prepend filenames with the given prefix
old_filename = os.path.basename(value)
if old_filename:
if no_prefix:
new_filename = old_filename
else:
new_filename = '_'.join([prefix, old_filename])
args_dict[param] = os.path.join(dirname, new_filename)
else:
args_dict[param] = dirname
os.chdir(wd)
def _make_dirs(self, path):
"""make dirs using os.makedirs"""
if not path:
return
try:
os.makedirs(path)
except OSError as e:
if e.strerror == 'File exists':
pass
else:
raise
class Pipeline(object):
'''
a pipeline could be composed of one or more ruffus task
that can be run as an independent entity provided that proper input/output
arguments are passed to it.
'''
def __init__(self, pipeline_name, config_file, script_dir=os.getcwd(), sample_id=None):
self.pipeline_name = pipeline_name
self.config_file = config_file
self.script_dir = script_dir
self.sample_id = sample_id
make_dir(self.script_dir)
## path to where the resultant pipeline script is written
self.pipeline_script = os.path.join(self.script_dir, self.pipeline_name+'.py')
## use the WorkFlow to parse/make the config file
self.wf = WorkFlow(config_file)
## holds the starting point of the sub pipeline, key:tag value:task_object
self.start_task = {}
## holds the end point of the sub pipeline, key:tag value:task_object
self.stop_task = {}
## list of all the inputs to the pipeline, i.e. set of the inputs of
## all the root tasks. A dict with k:input_params and v:input_arguments
self.inputs = {}
def make_script(self, sample_id):
"""run the plumber and make a python script for the pipeline."""
with open(self.pipeline_script, 'w') as ps:
plumber = Plumber(ps, self.wf)
plumber.make_script(sample_id)
def run(self):
try:
##TODO: this part is incomplete
## Technically, a pipeline is a script, and we run the
## script here using a LocalJobManager
cmd = 'python {}'.format(self.pipeline_script)
proc = sub.Popen(cmd, shell=True)
cmdout, cmderr = proc.communicate()
print cmdout, cmderr
# ljm = LocalJobManager(logs_dir, results_dir)
# ljm.run_job(cmd=cmd)
except KeyboardInterrupt:
print 'KeyboardInterruption in main'
self.kill()
raise
def kill(self):
"""kill all the jobs."""
pass
def add_component(self, component_name, component_parent_dir):
pass
def add_task(self, task_name, component):
"""add task object to the list of tasks."""
task = Task(task_name, component)
self.tasks[task_name] = task
def get_inputs(self):
"""get the list of all input file parameters of all the root
components in the pipeline.
"""
return self.tasks['root'].input_files
def update_pipeline_script_args(self, args_namespace):
"""update args namespace of the pipeline script."""
## change the Namespace object to dictionary
args_dict = vars(args_namespace)
##TODO: make proper dictionary from the values that
## needs to be passed to the pipeline script
kwargs = None
args_dict.update(kwargs)
def update_components_args(self):
"""update all the arguments of all the components in the pipeline.
It is equivalent to running __TASK___task.update_comp_args()
method over each of the components in the pipeline.
"""
pass
def update_components_reqs(self):
"""update all the requirements of all the components in the pipeline.
It is equivalent to running __TASK___task.update_comp_reqs()
method over each of the components in the pipeline.
"""
pass
def import_python_modules(self):
"""import required python modules for the pipeline to run."""
pass
def import_factory_modules(self):
"""import required factory modules for the pipeline to run."""
pass
def set_start_task(self, task_name):
self.start_task = self.tasks[task_name]
def set_stop_task(self, task_name):
self.stop_task = self.tasks[task_name]
| 36.131313 | 96 | 0.620911 | 1,335 | 10,731 | 4.806742 | 0.201498 | 0.050647 | 0.02244 | 0.017921 | 0.170952 | 0.125916 | 0.105969 | 0.105969 | 0.095683 | 0.072308 | 0 | 0.001338 | 0.30342 | 10,731 | 296 | 97 | 36.253378 | 0.857124 | 0.150405 | 0 | 0.156463 | 0 | 0 | 0.04079 | 0 | 0 | 0 | 0 | 0.006757 | 0 | 0 | null | null | 0.047619 | 0.068027 | null | null | 0.013605 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c4b380ac5b2bec0b07861a3d99e7430566f32546 | 2,724 | py | Python | odoo-13.0/venv/lib/python3.8/site-packages/stdnum/imo.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/venv/lib/python3.8/site-packages/stdnum/imo.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 2 | 2021-06-22T01:34:18.000Z | 2021-06-22T01:40:28.000Z | odoo-13.0/venv/lib/python3.8/site-packages/stdnum/imo.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | # imo.py - functions for handling IMO numbers
# coding: utf-8
#
# Copyright (C) 2015 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""IMO number (International Maritime Organization number).
A number used to uniquely identify ships (the hull) for purposes of
registering owners and management companies. The ship identification number
consists of a six-digit sequentially assigned number and a check digit. The
number is usually prefixed with "IMO".
Note that there seem to be a large number of ships with an IMO that does not
have a valid check digit or even have a different length.
>>> validate('IMO 9319466')
'9319466'
>>> validate('IMO 8814275')
'8814275'
>>> validate('8814274')
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('8814275')
'IMO 8814275'
"""
from stdnum.exceptions import *
from stdnum.util import clean, isdigits
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' ').upper().strip()
if number.startswith('IMO'):
number = number[3:]
return number
def calc_check_digit(number):
"""Calculate the check digits for the number."""
return str(sum(int(n) * (7 - i) for i, n in enumerate(number[:6])) % 10)
def validate(number):
"""Check if the number provided is valid. This checks the length and
check digit."""
number = compact(number)
if not isdigits(number):
raise InvalidFormat()
if len(number) != 7:
raise InvalidLength()
if calc_check_digit(number[:-1]) != number[-1]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Check if the number provided is valid. This checks the length and
check digit."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the number to the standard presentation format."""
return 'IMO ' + compact(number)
| 31.674419 | 76 | 0.714391 | 388 | 2,724 | 5.002577 | 0.469072 | 0.032458 | 0.018547 | 0.029366 | 0.122617 | 0.122617 | 0.106131 | 0.071097 | 0.071097 | 0.071097 | 0 | 0.034247 | 0.196035 | 2,724 | 85 | 77 | 32.047059 | 0.852055 | 0.674376 | 0 | 0.08 | 0 | 0 | 0.009697 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.08 | 0 | 0.52 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
c4b3b6d76efc3c8c72713052f1e8b243b1695f31 | 265 | py | Python | yodl/__init__.py | brunolange/yodl | d9e957cacf1391fce3dfe9ac24e4fb434d14d8b0 | [
"MIT"
] | null | null | null | yodl/__init__.py | brunolange/yodl | d9e957cacf1391fce3dfe9ac24e4fb434d14d8b0 | [
"MIT"
] | null | null | null | yodl/__init__.py | brunolange/yodl | d9e957cacf1391fce3dfe9ac24e4fb434d14d8b0 | [
"MIT"
] | null | null | null | """yodl!
yodl provides a class decorator to build django models
from YAML configuration files
"""
from .decorators import yodl
from .io import yodlify
__author__ = "Bruno Lange"
__email__ = "blangeram@gmail.com"
__license__ = "MIT"
__all__ = ["yodl", "yodlify"]
| 18.928571 | 54 | 0.743396 | 34 | 265 | 5.323529 | 0.794118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.150943 | 265 | 13 | 55 | 20.384615 | 0.804444 | 0.339623 | 0 | 0 | 0 | 0 | 0.261905 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
c4b45d589da887df80357b5a791263b44c35a390 | 6,010 | py | Python | main.py | g-w1/hermes | 4c7388c0d75187b79c0c27e4322aa9e79a44666c | [
"MIT"
] | null | null | null | main.py | g-w1/hermes | 4c7388c0d75187b79c0c27e4322aa9e79a44666c | [
"MIT"
] | null | null | null | main.py | g-w1/hermes | 4c7388c0d75187b79c0c27e4322aa9e79a44666c | [
"MIT"
] | null | null | null | """
Usage: hermes install [-dsvV] <pkg>...
hermes -h | --help
hermes --version
Options:
-d, --depends Require dependency installation
-h, --help Display usage and options
-s, --check-sigs Verify package GPG signatures
-v, --verify Verify package checksums
-V, --verbose Display debugging messages
--version Display version
"""
from configure import valid_hermes_config
from configure import valid_pkg_config
from docopt import docopt # MIT License
import os # Standard Library
import requests # Apache License v2.0
import sh # MIT License
import tarfile # Standard Library
def dl_url(url):
dl = requests.get(source_url)
if not dl.status == 200: # is this actually a meaningful test?
return False
with open(pkg_id, 'wb') as archive: # pkg_id deoesn't include extension(s)
for chunk in dl.iter_content(1024):
archive.write(chunk)
# where does it write it? how does it know?
# what about errors?
return True
def get_pkg(pkg_id):
source_url = pkg_configs[pkg_id][source_url]
if not dl_pkg(source_url):
return False
if not os.path.isfile(os.path.join(hermes_dir, 'archives', pkg_id)):
return False
if not valid_archive(pkg_id):
return False
# if runtime_config[verify_pkg]:
# if not verified:
# return False
# if runtime_config[check_sigs]:
# if not verified:
# return False
return True
def get_pkg_config(pkg_id):
# This is a placeholder for repository-enabled functionality
return True
def install_pkg(pkg_id):
if runtime_config['install_dependencies']:
for dependency in pkg_configs[pkg_id]['dependencies']:
if not pkg_installed(dependency):
install_pkg(dependency)
# actual install code here
def main_installer(pkg_list):
for pkg_id in pkg_list:
if pkg_installed(pkg_id):
print pkg_id, 'is already installed.'
elif pkg_prepared(pkg_id):
install_pkg(pkg_id)
else:
# Error message
return False
def pkg_avail(pkg_id):
if True: # if archive is in hermes/archives and valid_archive(pkg_id)
return True
if get_pkg(pkg_id):
return True
# Error message
return False
def pkg_config_avail(pkg_id):
pkg_config_path = os.path.join(hermes_dir, 'configs', (pkg_id + '.hermes'))
if pkg_id in pkg_configs:
return True
elif os.path.isfile(pkg_config_path):
pkg_config = valid_pkg_config(pkg_config_path)
if pkg_config:
# populate pkg_configs[pkg_id] with contents of pkg_config
return True
else:
# Error message
return False
elif get_pkg_config(pkg_id):
return False # temporary short-circuit (get_pkg_config() is a dummy)
pkg_config = valid_pkg_config(pkg_config_path)
if pkg_config:
# populate pkg_configs[pkg_id] with contents of pkg_config
return True
else:
# Error message
return False
def pkg_installed(pkg_id):
# if symlink in target_dir points at package in hermes/pkg
# return True
# if symlink in target_dir points elsewhere
# deal with conflict
# if binary already exists in target_dir
# deal with conflict
# Error message
return False
def pkg_prepared(pkg_id):
if pkg_installed(pkg_id):
return True
if not pkg_config_avail(pkg_id):
# Error message
return False
if not pkg_avail(pkg_id):
# Error message
return False
if runtime_config[install_dependencies]:
for dependency in pkg_configs[pkg_id][dependencies]:
if not pkg_prepared(dependency):
# Error message
return False
return True
def populate_runtime_config():
hermes_config = dict()
system_config_path = os.path.join(hermes_dir, '.hermes.conf')
user_config_path = os.path.expanduser(os.path.join('~', '.hermes.conf'))
if os.path.isfile(user_config_path):
hermes_config = valid_hermes_config(user_config_path)
if not hermes_config and os.path.isfile(system_config_path):
hermes_config = valid_hermes_config(system_config_path)
if not hermes_config:
hermes_config['check_sigs'] = True
hermes_config['install_dependencies'] = False
hermes_config['target_dir'] = '/usr/local'
hermes_config['verify_pkgs'] = True
if cli_args['--depends']:
runtime_config['install_dependencies'] = True
if cli_args['--check-sigs']:
runtime_config['check_sigs'] = True
if cli_args['--verify']:
runtime_config['verify_pkgs'] = True
return hermes_config
def valid_archive(pkg_id):
tarball_name = pkg_id + pkg_configs[pkg_id]['tarball_ext']
tarball_path = os.join.path(hermes_dir, 'archives', tarball_name)
if not os.path.isfile(tarball_path):
return False
if not tarfile.is_tarfile(tarball_path):
return False
return True
def valid_pkg(pkg_id):
# if not valid_archive(pkg_id):
# Error message
# return False
# if cli_args[--verify'] and checksum is bad:
# Error message
# return False
# if cli_args['--check-sigs'] and sig is bad:
# Error message
# return False
return True
if __name__ == '__main__':
cli_args = docopt(__doc__, version='hermes v0.0.1')
print cli_args
# hermes_dir = os.path.dirname(sh.which('hermes'))
hermes_dir = 'hermes'
runtime_config = populate_runtime_config()
print runtime_config
pkg_configs = dict()
if cli_args['install']:
print 'Installing ', str(cli_args['<pkg>'])
main_installer(cli_args['<pkg>'])
| 30.820513 | 79 | 0.632612 | 779 | 6,010 | 4.634146 | 0.198973 | 0.047091 | 0.054848 | 0.070083 | 0.396953 | 0.282271 | 0.204986 | 0.137396 | 0.11856 | 0.11856 | 0 | 0.002791 | 0.284526 | 6,010 | 194 | 80 | 30.979381 | 0.836744 | 0.198336 | 0 | 0.310345 | 0 | 0 | 0.07077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.060345 | null | null | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c4b535911ba95193b86d162ae29dd779c08ef75c | 26,047 | py | Python | userbot/plugins/quotes.py | aksr-aashish/FIREXUSERBOT | dff0b7bf028cb27779626ce523402346cc990402 | [
"MIT"
] | null | null | null | userbot/plugins/quotes.py | aksr-aashish/FIREXUSERBOT | dff0b7bf028cb27779626ce523402346cc990402 | [
"MIT"
] | 1 | 2022-01-09T11:35:06.000Z | 2022-01-09T11:35:06.000Z | userbot/plugins/quotes.py | aksr-aashish/FIREXUSERBOT | dff0b7bf028cb27779626ce523402346cc990402 | [
"MIT"
] | null | null | null | import random
import requests
from FIREX.utils import admin_cmd, edit_or_reply, sudo_cmd
from userbot.cmdhelp import CmdHelp
LOVESTR = [
"The best and most beautiful things in this world cannot be seen or even heard, but must be felt with the heart.",
"You know you're in love when you can't fall asleep because reality is finally better than your dreams.",
"Love recognizes no barriers. It jumps hurdles, leaps fences, penetrates walls to arrive at its destination full of hope.",
"Being deeply loved by someone gives you strength, while loving someone deeply gives you courage.",
"The real lover is the man who can thrill you by kissing your forehead or smiling into your eyes or just staring into space.",
"I swear I couldn't love you more than I do right now, and yet I know I will tomorrow.",
"When I saw you I fell in love, and you smiled because you knew it.",
"In all the world, there is no heart for me like yours. / In all the world, there is no love for you like mine.",
"To love or have loved, that is enough. Ask nothing further. There is no other pearl to be found in the dark folds of life.",
"If you live to be a hundred, I want to live to be a hundred minus one day, so I never have to live without you.",
"Some love stories aren't epic novels. Some are short stories. But that doesn't make them any less filled with love.",
"As he read, I fell in love the way you fall asleep: slowly, and then all at once.",
"I've never had a moment's doubt. I love you. I believe in you completely. You are my dearest one. My reason for life.",
"Do I love you? My god, if your love were a grain of sand, mine would be a universe of beaches.",
"I am who I am because of you.",
"I just want you to know that you're very special... and the only reason I'm telling you is that I don't know if anyone else ever has.",
"Remember, we're madly in love, so it's all right to kiss me any time you feel like it.",
"I love you. I knew it the minute I met you.",
"I loved her against reason, against promise, against peace, against hope, against happiness, against all discouragement that could be.",
"I love you not because of who you are, but because of who I am when I am with you.",
]
DHOKA = [
"Humne Unse Wafa Ki, Aur Dil Bhi Gya Toot, Wo Bhi Chinaal Nikli, Uski Maa ki Chut.",
"Dabbe Me Dabba, Dabbe Me Cake ..Tu Chutiya Hai Zara Seesha To Dekh.",
"Kaam Se Kaam Rakhoge Toh Naam Hoga, Randi Log Ke Chakkkar Me Padoge to Naam Badnaam Hoga.",
"Usne Kaha- Mah Lyf maH Rule, Maine Kaha Bhag BSDK , Tujhy Paida Karna hi Teri Baap ki Sabse Badi Vul.",
"Humse Ulajhna Mat, BSDK Teri Hasi Mita Dunga, Muh Me Land Daal Ke..Sari Hosiyaari Gand Se Nikal Dunga.",
"Aur Sunau Bhosdiwalo ..Kya Haal Hai?..Tumhare Sakal Se Zayda Toh Tumhare Gand Laal Hai!!",
"Pata Nhi Kya Kashish Hai Tumhare Mohabbat Me,Jab Bhi Tumhe Yaad Karta Hu Mera Land Khada Ho Jata Hai.",
"Konsa Mohabbat Kounsi Story, Gand Faad Dunga Agr Bolne Aayi Sorry!",
"Naam Banta Hai Risk Se, Chutiya Banta Hai IshQ Se.",
"Sun Be, Ab Tujhy Mere Zindegi Me Ane ka Koi Haq Nhi,,Aur Tu 1 Number Ki Randi Hai Isme KOi Saq Nhi.",
"Beta Tu Chugli Karna Chor De , Hum Ungli Karna Chor Dengy.",
]
METOOSTR = [
"Me too thanks",
"Haha yes, me too",
"Same lol",
"Me irl",
"Same here",
"Haha yes",
"Me rn",
]
GDNOON = [
"`My wishes will always be with you, Morning wish to make you feel fresh, Afternoon wish to accompany you, Evening wish to refresh you, Night wish to comfort you with sleep, Good Afternoon Dear!`",
"`With a deep blue sky over my head and a relaxing wind around me, the only thing I am missing right now is the company of you. I wish you a refreshing afternoon!`",
"`The day has come a halt realizing that I am yet to wish you a great afternoon. My dear, if you thought you were forgotten, you’re so wrong. Good afternoon!`",
"`Good afternoon! May the sweet peace be part of your heart today and always and there is life shining through your sigh. May you have much light and peace.`",
"`With you, every part of a day is beautiful. I live every day to love you more than yesterday. Wishing you an enjoyable afternoon my love!`",
"`This bright afternoon sun always reminds me of how you brighten my life with all the happiness. I miss you a lot this afternoon. Have a good time`!",
"`Nature looks quieter and more beautiful at this time of the day! You really don’t want to miss the beauty of this time! Wishing you a happy afternoon!`",
"`What a wonderful afternoon to finish you day with! I hope you’re having a great time sitting on your balcony, enjoying this afternoon beauty!`",
"`I wish I were with you this time of the day. We hardly have a beautiful afternoon like this nowadays. Wishing you a peaceful afternoon!`",
"`As you prepare yourself to wave goodbye to another wonderful day, I want you to know that, I am thinking of you all the time. Good afternoon!`",
"`This afternoon is here to calm your dog-tired mind after a hectic day. Enjoy the blessings it offers you and be thankful always. Good afternoon!`",
"`The gentle afternoon wind feels like a sweet hug from you. You are in my every thought in this wonderful afternoon. Hope you are enjoying the time!`",
"`Wishing an amazingly good afternoon to the most beautiful soul I have ever met. I hope you are having a good time relaxing and enjoying the beauty of this time!`",
"`Afternoon has come to indicate you, Half of your day’s work is over, Just another half a day to go, Be brisk and keep enjoying your works, Have a happy noon!`",
"`Mornings are for starting a new work, Afternoons are for remembering, Evenings are for refreshing, Nights are for relaxing, So remember people, who are remembering you, Have a happy noon!`",
"`If you feel tired and sleepy you could use a nap, you will see that it will help you recover your energy and feel much better to finish the day. Have a beautiful afternoon!`",
"`Time to remember sweet persons in your life, I know I will be first on the list, Thanks for that, Good afternoon my dear!`",
"`May this afternoon bring a lot of pleasant surprises for you and fills you heart with infinite joy. Wishing you a very warm and love filled afternoon!`",
"`Good, better, best. Never let it rest. Til your good is better and your better is best. “Good Afternoon`”",
"`May this beautiful afternoon fill your heart boundless happiness and gives you new hopes to start yours with. May you have lot of fun! Good afternoon dear!`",
"`As the blazing sun slowly starts making its way to the west, I want you to know that this beautiful afternoon is here to bless your life with success and peace. Good afternoon!`",
"`The deep blue sky of this bright afternoon reminds me of the deepness of your heart and the brightness of your soul. May you have a memorable afternoon!`",
"`Your presence could make this afternoon much more pleasurable for me. Your company is what I cherish all the time. Good afternoon!`",
"`A relaxing afternoon wind and the sweet pleasure of your company can make my day complete. Missing you so badly during this time of the day! Good afternoon!`",
"`Wishing you an afternoon experience so sweet and pleasant that feel thankful to be alive today. May you have the best afternoon of your life today!`",
"`My wishes will always be with you, Morning wish to make you feel fresh, Afternoon wish to accompany you, Evening wish to refresh you, Night wish to comfort you with sleep, Good afternoon dear!`",
"`Noon time – it’s time to have a little break, Take time to breathe the warmth of the sun, Who is shining up in between the clouds, Good afternoon!`",
"`You are the cure that I need to take three times a day, in the morning, at the night and in the afternoon. I am missing you a lot right now. Good afternoon!`",
"`I want you when I wake up in the morning, I want you when I go to sleep at night and I want you when I relax under the sun in the afternoon!`",
"`I pray to god that he keeps me close to you so we can enjoy these beautiful afternoons together forever! Wishing you a good time this afternoon!`",
"`You are every bit of special to me just like a relaxing afternoon is special after a toiling noon. Thinking of my special one in this special time of the day!`",
"`May your Good afternoon be light, blessed, enlightened, productive and happy.`",
"`Thinking of you is my most favorite hobby every afternoon. Your love is all I desire in life. Wishing my beloved an amazing afternoon!`",
"`I have tasted things that are so sweet, heard words that are soothing to the soul, but comparing the joy that they both bring, I’ll rather choose to see a smile from your cheeks. You are sweet. I love you.`",
"`How I wish the sun could obey me for a second, to stop its scorching ride on my angel. So sorry it will be hot there. Don’t worry, the evening will soon come. I love you.`",
"`I want you when I wake up in the morning, I want you when I go to sleep at night and I want you when I relax under the sun in the afternoon!`",
"`With you every day is my lucky day. So lucky being your love and don’t know what else to say. Morning night and noon, you make my day.`",
"`Your love is sweeter than what I read in romantic novels and fulfilling more than I see in epic films. I couldn’t have been me, without you. Good afternoon honey, I love you!`",
"`No matter what time of the day it is, No matter what I am doing, No matter what is right and what is wrong, I still remember you like this time, Good Afternoon!`",
"`Things are changing. I see everything turning around for my favor. And the last time I checked, it’s courtesy of your love. 1000 kisses from me to you. I love you dearly and wishing you a very happy noon.`",
"`You are sometimes my greatest weakness, you are sometimes my biggest strength. I do not have a lot of words to say but let you make sure, you make my day, Good Afternoon!`",
"`Every afternoon is to remember the one whom my heart beats for. The one I live and sure can die for. Hope you doing good there my love. Missing your face.`",
"`My love, I hope you are doing well at work and that you remember that I will be waiting for you at home with my arms open to pamper you and give you all my love. I wish you a good afternoon!`",
"`Afternoons like this makes me think about you more. I desire so deeply to be with you in one of these afternoons just to tell you how much I love you. Good afternoon my love!`",
"`My heart craves for your company all the time. A beautiful afternoon like this can be made more enjoyable if you just decide to spend it with me. Good afternoon!`",
]
CHASE_STR = [
"Where do you think you're going?",
"Huh? what? did they get away?",
"ZZzzZZzz... Huh? what? oh, just them again, nevermind.",
"`Get back here!`",
"`Not so fast...`",
"Look out for the wall!",
"Don't leave me alone with them!!",
"You run, you die.",
"`Jokes on you, I'm everywhere`",
"You're gonna regret that...",
"You could also try /kickme, I hear that's fun.",
"`Go bother someone else, no-one here cares.`",
"You can run, but you can't hide.",
"Is that all you've got?",
"I'm behind you...",
"You've got company!",
"We can do this the easy way, or the hard way.",
"You just don't get it, do you?",
"Yeah, you better run!",
"Please, remind me how much I care?",
"I'd run faster if I were you.",
"That's definitely the droid we're looking for.",
"May the odds be ever in your favour.",
"Famous last words.",
"And they disappeared forever, never to be seen again.",
'"Oh, look at me! I\'m so cool, I can run from a bot!" - this person',
"Yeah yeah, just tap /kickme already.",
"Here, take this ring and head to Mordor while you're at it.",
"eviral has it, they're still running...",
"Unlike Harry Potter, your parents can't protect you from me.",
"Fear leads to anger. Anger leads to hate. Hate leads to suffering. If you keep running in fear, you might "
"be the next Vader.",
"Multiple calculations later, I have decided my interest in your shenanigans is exactly 0.",
"eviral has it, they're still running.",
"Keep it up, not sure we want you here anyway.",
"You're a wiza- Oh. Wait. You're not Harry, keep moving.",
"NO RUNNING IN THE HALLWAYS!",
"Hasta la vista, baby.",
"Who let the dogs out?",
"It's funny, because no one cares.",
"Ah, what a waste. I liked that one.",
"Frankly, my dear, I don't give a damn.",
"My milkshake brings all the boys to yard... So run faster!",
"You can't HANDLE the truth!",
"A long time ago, in a galaxy far far away... Someone would've cared about that. Not anymore though.",
"Hey, look at them! They're running from the inevitable banhammer... Cute.",
"Han shot first. So will I.",
"What are you running after, a white rabbit?",
"As The Doctor would say... RUN!",
]
eviralOSTR = [
"Hi !",
"‘Ello, gov'nor!",
"What’s crackin’?",
"Howdy, howdy ,howdy!",
"hello, who's there, I'm talking.",
"You know who this is.",
"Yo!",
"Whaddup.",
"Greetings and salutations!",
"hello, sunshine!",
"`Hey, howdy, hi!`",
"What’s kickin’, little chicken?",
"Peek-a-boo!",
"Howdy-doody!",
"`Hey there, freshman!`",
"`I come in peace!`",
"`I come for peace!`",
"Ahoy, matey!",
"`Hi !`",
]
CONGRATULATION = [
"`Congratulations and BRAVO!`",
"`You did it! So proud of you!`",
"`This calls for celebrating! Congratulations!`",
"`I knew it was only a matter of time. Well done!`",
"`Congratulations on your well-deserved success.`",
"`Heartfelt congratulations to you.`",
"`Warmest congratulations on your achievement.`",
"`Congratulations and best wishes for your next adventure!”`",
"`So pleased to see you accomplishing great things.`",
"`Feeling so much joy for you today. What an impressive achievement!`",
]
BYESTR = [
"`Nice talking with you`",
"`I've gotta go!`",
"`I've gotta run!`",
"`I've gotta split`",
"`I'm off!`",
"`Great to see you,bye`",
"`See you soon`",
"`Farewell!`",
]
GDNIGHT = [
"`Good night keep your dreams alive`",
"`Night, night, to a dear friend! May you sleep well!`",
"`May the night fill with stars for you. May counting every one, give you contentment!`",
"`Wishing you comfort, happiness, and a good night’s sleep!`",
"`Now relax. The day is over. You did your best. And tomorrow you’ll do better. Good Night!`",
"`Good night to a friend who is the best! Get your forty winks!`",
"`May your pillow be soft, and your rest be long! Good night, friend!`",
"`Let there be no troubles, dear friend! Have a Good Night!`",
"`Rest soundly tonight, friend!`",
"`Have the best night’s sleep, friend! Sleep well!`",
"`Have a very, good night, friend! You are wonderful!`",
"`Relaxation is in order for you! Good night, friend!`",
"`Good night. May you have sweet dreams tonight.`",
"`Sleep well, dear friend and have sweet dreams.`",
"`As we wait for a brand new day, good night and have beautiful dreams.`",
"`Dear friend, I wish you a night of peace and bliss. Good night.`",
"`Darkness cannot last forever. Keep the hope alive. Good night.`",
"`By hook or crook you shall have sweet dreams tonight. Have a good night, buddy!`",
"`Good night, my friend. I pray that the good Lord watches over you as you sleep. Sweet dreams.`",
"`Good night, friend! May you be filled with tranquility!`",
"`Wishing you a calm night, friend! I hope it is good!`",
"`Wishing you a night where you can recharge for tomorrow!`",
"`Slumber tonight, good friend, and feel well rested, tomorrow!`",
"`Wishing my good friend relief from a hard day’s work! Good Night!`",
"`Good night, friend! May you have silence for sleep!`",
"`Sleep tonight, friend and be well! Know that you have done your very best today, and that you will do your very best, tomorrow!`",
"`Friend, you do not hesitate to get things done! Take tonight to relax and do more, tomorrow!`",
"`Friend, I want to remind you that your strong mind has brought you peace, before. May it do that again, tonight! May you hold acknowledgment of this with you!`",
"`Wishing you a calm, night, friend! Hoping everything winds down to your liking and that the following day meets your standards!`",
"`May the darkness of the night cloak you in a sleep that is sound and good! Dear friend, may this feeling carry you through the next day!`",
"`Friend, may the quietude you experience tonight move you to have many more nights like it! May you find your peace and hold on to it!`",
"`May there be no activity for you tonight, friend! May the rest that you have coming to you arrive swiftly! May the activity that you do tomorrow match your pace and be all of your own making!`",
"`When the day is done, friend, may you know that you have done well! When you sleep tonight, friend, may you view all the you hope for, tomorrow!`",
"`When everything is brought to a standstill, friend, I hope that your thoughts are good, as you drift to sleep! May those thoughts remain with you, during all of your days!`",
"`Every day, you encourage me to do new things, friend! May tonight’s rest bring a new day that overflows with courage and exciting events!`",
]
GDMORNING = [
"`Life is full of uncertainties. But there will always be a sunrise after every sunset. Good morning!`",
"`It doesn’t matter how bad was your yesterday. Today, you are going to make it a good one. Wishing you a good morning!`",
"`If you want to gain health and beauty, you should wake up early. Good morning!`",
"`May this morning offer you new hope for life! May you be happy and enjoy every moment of it. Good morning!`",
"`May the sun shower you with blessings and prosperity in the days ahead. Good morning!`",
"`Every sunrise marks the rise of life over death, hope over despair and happiness over suffering. Wishing you a very enjoyable morning today!`",
"`Wake up and make yourself a part of this beautiful morning. A beautiful world is waiting outside your door. Have an enjoyable time!`",
"`Welcome this beautiful morning with a smile on your face. I hope you’ll have a great day today. Wishing you a very good morning!`",
"`You have been blessed with yet another day. What a wonderful way of welcoming the blessing with such a beautiful morning! Good morning to you!`",
"`Waking up in such a beautiful morning is a guaranty for a day that’s beyond amazing. I hope you’ll make the best of it. Good morning!`",
"`Nothing is more refreshing than a beautiful morning that calms your mind and gives you reasons to smile. Good morning! Wishing you a great day.`",
"`Another day has just started. Welcome the blessings of this beautiful morning. Rise and shine like you always do. Wishing you a wonderful morning!`",
"`Wake up like the sun every morning and light up the world your awesomeness. You have so many great things to achieve today. Good morning!`",
"`A new day has come with so many new opportunities for you. Grab them all and make the best out of your day. Here’s me wishing you a good morning!`",
"`The darkness of night has ended. A new sun is up there to guide you towards a life so bright and blissful. Good morning dear!`",
"`Wake up, have your cup of morning tea and let the morning wind freshen you up like a happiness pill. Wishing you a good morning and a good day ahead!`",
"`Sunrises are the best; enjoy a cup of coffee or tea with yourself because this day is yours, good morning! Have a wonderful day ahead.`",
"`A bad day will always have a good morning, hope all your worries are gone and everything you wish could find a place. Good morning!`",
"`A great end may not be decided but a good creative beginning can be planned and achieved. Good morning, have a productive day!`",
"`Having a sweet morning, a cup of coffee, a day with your loved ones is what sets your “Good Morning” have a nice day!`",
"`Anything can go wrong in the day but the morning has to be beautiful, so I am making sure your morning starts beautiful. Good morning!`",
"`Open your eyes with a smile, pray and thank god that you are waking up to a new beginning. Good morning!`",
"`Morning is not only sunrise but A Beautiful Miracle of God that defeats the darkness and spread light. Good Morning.`",
"`Life never gives you a second chance. So, enjoy every bit of it. Why not start with this beautiful morning. Good Morning!`",
"`If you want to gain health and beauty, you should wake up early. Good Morning!`",
"`Birds are singing sweet melodies and a gentle breeze is blowing through the trees, what a perfect morning to wake you up. Good morning!`",
"`This morning is so relaxing and beautiful that I really don’t want you to miss it in any way. So, wake up dear friend. A hearty good morning to you!`",
"`Mornings come with a blank canvas. Paint it as you like and call it a day. Wake up now and start creating your perfect day. Good morning!`",
"`Every morning brings you new hopes and new opportunities. Don’t miss any one of them while you’re sleeping. Good morning!`",
"`Start your day with solid determination and great attitude. You’re going to have a good day today. Good morning my friend!`",
"`Friendship is what makes life worth living. I want to thank you for being such a special friend of mine. Good morning to you!`",
"`A friend like you is pretty hard to come by in life. I must consider myself lucky enough to have you. Good morning. Wish you an amazing day ahead!`",
"`The more you count yourself as blessed, the more blessed you will be. Thank God for this beautiful morning and let friendship and love prevail this morning.`",
"`Wake up and sip a cup of loving friendship. Eat your heart out from a plate of hope. To top it up, a fork full of kindness and love. Enough for a happy good morning!`",
"`It is easy to imagine the world coming to an end. But it is difficult to imagine spending a day without my friends. Good morning.`",
]
@bot.on(admin_cmd(pattern=f"love$", outgoing=True))
@bot.on(sudo_cmd(pattern='love$', allow_sudo=True))
async def love(e):
txt = random.choice(LOVESTR)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"dhoka$", outgoing=True))
@bot.on(sudo_cmd(pattern='dhoka$', allow_sudo=True))
async def katgya(e):
txt = random.choice(DHOKA)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"metoo$", outgoing=True))
@bot.on(sudo_cmd(pattern='metoo$', allow_sudo=True))
async def metoo(e):
txt = random.choice(METOOSTR)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"gdnoon$", outgoing=True))
@bot.on(sudo_cmd(pattern='gdnoon$', allow_sudo=True))
async def noon(e):
txt = random.choice(GDNOON)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"chase$", outgoing=True))
@bot.on(sudo_cmd(pattern='chase$', allow_sudo=True))
async def police(e):
txt = random.choice(CHASE_STR)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"congo$", outgoing=True))
@bot.on(sudo_cmd(pattern='congo$', allow_sudo=True))
async def Sahih(e):
txt = random.choice(CONGRATULATION)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"qhi$", outgoing=True))
@bot.on(sudo_cmd(pattern='qhi$', allow_sudo=True))
async def hoi(e):
txt = random.choice(eviralOSTR)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"gdbye$", outgoing=True))
@bot.on(sudo_cmd(pattern='gdbye$', allow_sudo=True))
async def bhago(e):
txt = random.choice(BYESTR)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"gdnyt$", outgoing=True))
@bot.on(sudo_cmd(pattern='gdnyt$', allow_sudo=True))
async def night(e):
txt = random.choice(GDNIGHT)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"gdmng$", outgoing=True))
@bot.on(sudo_cmd(pattern='gdmng$', allow_sudo=True))
async def morning(e):
txt = random.choice(GDMORNING)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern="quote ?(.*)", outgoing=True))
@bot.on(sudo_cmd(pattern="quote ?(.*)", allow_sudo=True))
async def quote_search(event):
if event.fwd_from:
return
catevent = await edit_or_reply(event, "`Processing...`")
input_str = event.pattern_match.group(1)
if not input_str:
api_url = "https://quotes.cwprojects.live/random"
try:
response = requests.get(api_url).json()
except:
response = None
else:
api_url = f"https://quotes.cwprojects.live/search/query={input_str}"
try:
response = random.choice(requests.get(api_url).json())
except:
response = None
if response is not None:
await catevent.edit(f"`{response['text']}`")
else:
await edit_or_reply(catevent, "`Sorry Zero results found`", 5)
CmdHelp("quotes").add_command(
"quote", None, "Sends a random mind-blowing quote"
).add_command("gdmng", None, "Sends a random Good Morning Quote").add_command(
"gdnyt", None, "Sends a random Good Night Quote"
).add_command(
"gdbye", None, "Sends a random Good Byee Quote"
).add_command(
"qhi", None, "Sends a random hello msg"
).add_command(
"congo", None, "Sends a random congratulations quote"
).add_command(
"chase", None, "Sends a random Chase quote"
).add_command(
"gdnoon", None, "Sends a random Good Afternoon quote"
).add_command(
"metoo", None, 'Sends a text saying "Mee too"'
).add_command(
"dhoka", None, "Sends a random Dhoka quote(katt gya bc)"
).add_command(
"love", None, "Sends a random love quote🥰. (A stage before .dhoka)"
).add()
| 65.609572 | 214 | 0.702231 | 4,434 | 26,047 | 4.106676 | 0.215381 | 0.019935 | 0.009061 | 0.010544 | 0.130814 | 0.093415 | 0.087429 | 0.063101 | 0.058817 | 0.058817 | 0 | 0.000387 | 0.206934 | 26,047 | 396 | 215 | 65.775253 | 0.881009 | 0 | 0 | 0.078431 | 0 | 0.184874 | 0.803547 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.011204 | 0 | 0.014006 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c4b59ea674aa8a31f87633b437e5863be80f3ef3 | 4,089 | py | Python | tests/test_joints.py | slaclab/pystand | c0037d4af52cff98c7e758a7a0ff08156ade4646 | [
"BSD-3-Clause-LBNL"
] | null | null | null | tests/test_joints.py | slaclab/pystand | c0037d4af52cff98c7e758a7a0ff08156ade4646 | [
"BSD-3-Clause-LBNL"
] | null | null | null | tests/test_joints.py | slaclab/pystand | c0037d4af52cff98c7e758a7a0ff08156ade4646 | [
"BSD-3-Clause-LBNL"
] | 2 | 2018-05-30T19:02:58.000Z | 2020-12-13T00:35:01.000Z | ############
# Standard #
############
import math
###############
# Third Party #
###############
import ophyd
import pytest
##########
# Module #
##########
from detrot import ConeJoint, AngledJoint, StandPoint, Point
from conftest import PseudoMotor
@pytest.fixture(scope='function')
def pseudo_cone():
angled = ConeJoint(slide = PseudoMotor(5),
lift = PseudoMotor(10),
offset = Point(1,2,3))
return angled
@pytest.fixture(scope='function')
def pseudo_angle():
angled = AngledJoint(slide = PseudoMotor(5),
lift = PseudoMotor(10),
offset = Point(1,2,3))
return angled
def test_cone_joint(pseudo_cone):
#Test Vertical
pseudo_cone.alpha = math.pi/2.
assert pytest.approx(pseudo_cone.joint.x) == 5
assert pytest.approx(pseudo_cone.joint.y) == 10
#Test Horizontal
pseudo_cone.alpha= 0
assert pseudo_cone.joint.x == 15
assert pseudo_cone.joint.y == 0
def test_cone_invert(pseudo_cone):
#Test 45
pseudo_cone.alpha = math.pi/4.
assert pseudo_cone.invert((13.07,9.07))[0] == pytest.approx(5,0.1)
assert pseudo_cone.invert((13.07,9.07))[1] == pytest.approx(10,0.1)
def test_angle_joint(pseudo_angle):
#Test Vertical
pseudo_angle.alpha = math.pi/2.
assert pytest.approx(pseudo_angle.joint.x) == 5
assert pytest.approx(pseudo_angle.joint.y) == 10
assert pytest.approx(pseudo_angle.joint.z) == 0
#Test Horizontal
pseudo_angle.alpha = 0
assert pytest.approx(pseudo_angle.joint.x) == 5
assert pytest.approx(pseudo_angle.joint.y) == 0
assert pytest.approx(pseudo_angle.joint.z) == 10
#Test no-slide
pseudo_angle.slide = None
assert pytest.approx(pseudo_angle.joint.x) == 0
assert pytest.approx(pseudo_angle.joint.y) == 0
assert pytest.approx(pseudo_angle.joint.z) == 10
def test_angle_invert(pseudo_angle):
#Test Vertical
pseudo_angle.alpha = math.pi/2.
assert pseudo_angle.invert((6,12))[0] == pytest.approx(5,0.1)
assert pseudo_angle.invert((6,12))[1] == pytest.approx(10,0.1)
#Test no-slide
pseudo_angle.slide = None
assert pseudo_angle.invert((6,12)) == pytest.approx(10,0.1)
def test_position(pseudo_cone):
pseudo_cone.alpha= 0
assert pseudo_cone.position == (16, 2, 3)
pseudo_cone.alpha = math.pi/2.
assert pseudo_cone.position.x == pytest.approx(6,0.1)
assert pseudo_cone.position.y == 12
assert pseudo_cone.position.z == 3
def test_displacement(pseudo_angle):
assert pseudo_angle.displacement == (5,10)
pseudo_angle.slide = None
assert pseudo_angle.displacement == 10
def test_set_joint(pseudo_angle):
#Vertical
pseudo_angle.alpha = math.pi/2.
pseudo_angle.set_joint((6,12))
assert pseudo_angle.displacement[0] == pytest.approx(5,0.1)
assert pseudo_angle.displacement[1] == pytest.approx(10,0.1)
#Test no-slide
pseudo_angle.slide = None
pseudo_angle.set_joint((6,12))
assert pseudo_angle.displacement == pytest.approx(10,0.1)
def test_model(pseudo_angle, pseudo_cone):
model = AngledJoint.model(pseudo_angle)
assert isinstance(model.slide, ophyd.SoftPositioner)
assert isinstance(model.lift, ophyd.SoftPositioner)
assert model.displacement == pseudo_angle.displacement
#Test no slide
pseudo_angle.slide = None
model = AngledJoint.model(pseudo_angle)
assert model.slide == None
assert isinstance(model.lift, ophyd.SoftPositioner)
assert model.displacement == pseudo_angle.displacement
#Test cone
model = ConeJoint.model(pseudo_cone)
assert isinstance(model.slide, ophyd.SoftPositioner)
assert isinstance(model.lift, ophyd.SoftPositioner)
assert model.displacement == pseudo_cone.displacement
def test_stop(pseudo_cone):
pseudo_cone.stop()
pseudo_cone.slide.stop_call.method.assert_called_with()
pseudo_cone.lift.stop_call.method.assert_called_with()
def test_cmp():
p1 = PseudoMotor(5)
p2 = PseudoMotor(10)
assert AngledJoint(p1,p2) == AngledJoint(p1, p2)
| 30.288889 | 71 | 0.682563 | 560 | 4,089 | 4.828571 | 0.126786 | 0.154586 | 0.073225 | 0.097633 | 0.706361 | 0.691938 | 0.596524 | 0.45821 | 0.401627 | 0.377219 | 0 | 0.038692 | 0.184642 | 4,089 | 134 | 72 | 30.514925 | 0.772346 | 0.043042 | 0 | 0.420455 | 0 | 0 | 0.004191 | 0 | 0 | 0 | 0 | 0 | 0.443182 | 1 | 0.136364 | false | 0 | 0.056818 | 0 | 0.215909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.