content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import pytest
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from nerodia.exception import LocatorException
from nerodia.locators.element import Matcher
from nerodia.locators.element.locator import Locator
@pytest.fixture
# xpath can be built to represent entire selector
# when SelectorBuilder result has additional locators to match
| [
11748,
12972,
9288,
198,
6738,
384,
11925,
1505,
13,
11321,
13,
1069,
11755,
1330,
1400,
16678,
20180,
16922,
11,
520,
1000,
20180,
26687,
16922,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
47960,
13,
12384,
26230,
1330,
5313,
3210... | 3.669014 | 142 |
#! /usr/bin/env python3
import unittest
from datagenerator.BankAccount import BankAccount
from datagenerator.Person import Person
verbose_tests = False
if __name__ == '__main__':
unittest.main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
555,
715,
395,
198,
6738,
4818,
363,
877,
1352,
13,
28650,
30116,
1330,
5018,
30116,
198,
6738,
4818,
363,
877,
1352,
13,
15439,
1330,
7755,
628,
198,
19011,
577,
6... | 2.971014 | 69 |
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from typing_extensions import NotRequired, TypedDict, final
if TYPE_CHECKING:
from ..shared import Snowflake
from .user import UserData
__all__ = ('ApplicationData', 'TeamData', 'TeamMemberData')
# https://discord.com/developers/docs/resources/application#application-object-application-structure
@final
# https://discord.com/developers/docs/topics/teams#data-models-team-object
@final
# https://discord.com/developers/docs/topics/teams#data-models-team-member-object
@final
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
11,
7343,
11,
32233,
198,
198,
6738,
19720,
62,
2302,
5736,
1330,
1892,
37374,
11,
17134,
276,
35,
713,
11,
2457,
198,
198,
361,
41876,
62,
500... | 3.220994 | 181 |
undefined
for (i = 0; i < document.getElementsByTagName("code").length; i++) { console.log(document.getElementsByTagName("code")[i].innerText); }
| [
198,
917,
18156,
198,
1640,
357,
72,
796,
657,
26,
1312,
1279,
3188,
13,
1136,
36,
3639,
3886,
24835,
5376,
7203,
8189,
11074,
13664,
26,
1312,
29577,
1391,
220,
220,
220,
220,
8624,
13,
6404,
7,
22897,
13,
1136,
36,
3639,
3886,
248... | 2.732143 | 56 |
import numpy as np
IMAGE_SIZE = 32
# Returns the rgb values of a pixel in an image
# Checks whether the current position is in the image
# Checks whether the value of r, g and b is [x] in an array
# Checks whether the current position is in the image and that the pixel is not black
# Returns the neighbouring pixels of a pixel in a image
# Gets the mean value of the surrounding pixels
# Interpolates pixels of images to fill the black pixels
| [
11748,
299,
32152,
355,
45941,
198,
198,
3955,
11879,
62,
33489,
796,
3933,
198,
198,
2,
16409,
262,
46140,
3815,
286,
257,
17465,
287,
281,
2939,
198,
198,
2,
47719,
1771,
262,
1459,
2292,
318,
287,
262,
2939,
198,
198,
2,
47719,
1... | 3.913793 | 116 |
print(countTreesProduct('InputD03Q2.txt', [[1, 1], [3, 1], [5, 1], [7, 1], [1, 2]]))
| [
198,
198,
4798,
7,
9127,
51,
6037,
15667,
10786,
20560,
35,
3070,
48,
17,
13,
14116,
3256,
16410,
16,
11,
352,
4357,
685,
18,
11,
352,
4357,
685,
20,
11,
352,
4357,
685,
22,
11,
352,
4357,
685,
16,
11,
362,
11907,
4008,
198
] | 1.977273 | 44 |
import os
import re
import sys
import numpy as np
import nltk
import xml.etree.ElementTree as ET
from nltk.tokenize import TweetTokenizer
tknzr = TweetTokenizer()
from nltk.stem import PorterStemmer
ps = PorterStemmer()
from nltk.corpus import stopwords
from collections import Counter
import collections
import time
from time import mktime
from datetime import datetime
from collections import defaultdict
import time
from time import mktime
from datetime import datetime
| [
11748,
28686,
201,
198,
11748,
302,
201,
198,
11748,
25064,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
299,
2528,
74,
201,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
201,
198,
6738,
299,
2528,
74,
13... | 3.178344 | 157 |
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.template import loader
from django.views.generic import TemplateView | [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
198,
2,
13610,
534,
5009,
994,
13,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
28243,
1330,
40213,
198,
6738,
42625,
14208,
13,
33571,
1... | 4 | 45 |
from syned.beamline.beamline import Beamline | [
198,
6738,
827,
2817,
13,
40045,
1370,
13,
40045,
1370,
1330,
25855,
1370
] | 3.461538 | 13 |
DEV = VersionTag(0)
ALPHA = VersionTag(1)
BETA = VersionTag(2)
STABLE = VersionTag(3)
| [
198,
39345,
796,
10628,
24835,
7,
15,
8,
198,
1847,
47,
7801,
796,
10628,
24835,
7,
16,
8,
198,
33,
20892,
796,
10628,
24835,
7,
17,
8,
198,
2257,
17534,
796,
10628,
24835,
7,
18,
8,
198
] | 2.351351 | 37 |
from collections import Counter
import celery
from patients.models_enums import Zygosity
from snpdb.models import VCF, SampleLocusCount
@celery.shared_task
| [
6738,
17268,
1330,
15034,
198,
198,
11748,
18725,
1924,
198,
198,
6738,
3871,
13,
27530,
62,
268,
5700,
1330,
1168,
35641,
16579,
198,
6738,
3013,
79,
9945,
13,
27530,
1330,
569,
22495,
11,
27565,
43,
10901,
12332,
628,
198,
31,
7015,
... | 3.404255 | 47 |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name='syct',
version='0.4.3',
author="Nadav Oved",
author_email="nadavo@gmail.com",
description="A Simple Yet Convenient Timer module for Python 3",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nadavo/Timer.git",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src", exclude=["tests"]),
python_requires='>=3',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
1600,
21004,
2625,
40477,
12,
23,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
... | 2.496644 | 298 |
# -*- coding: utf-8 -*-
"""
@author: Asma Baccouche
"""
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from Get_Data import get_data
from sklearn.cluster import KMeans, SpectralClustering
from nltk.cluster import KMeansClusterer, util
from gensim.models import Word2Vec
from sklearn import metrics
import numpy as np
from Data_helper import sent_vectorizer
data, deps, Deps_Count = get_data()
sentences = [sentence for sentence in data['TITLE']]
s = [sentence.split() for sentence in data['TITLE']]
vectorizer1 = CountVectorizer()
vectorizer2 = TfidfVectorizer()
X_TF = vectorizer1.fit_transform(sentences)
X_TFIDF = vectorizer2.fit_transform(sentences)
kmeans1 = KMeans(n_clusters=8).fit(X_TF)
labels1 = kmeans1.labels_
kmeans2 = KMeans(n_clusters=8).fit(X_TFIDF)
labels2 = kmeans2.labels_
SpectralClustering1 = SpectralClustering(n_clusters=8, assign_labels="discretize", random_state=0).fit(X_TF)
labels3 = SpectralClustering1.labels_
SpectralClustering2 = SpectralClustering(n_clusters=8, assign_labels="discretize", random_state=0).fit(X_TFIDF)
labels4 = SpectralClustering2.labels_
model = Word2Vec(s, min_count=1)
X = np.zeros((len(s), 100))
for i in range(len(s)):
X[i] = sent_vectorizer(s[i], model)
kclusterer = KMeansClusterer(8, distance=util.cosine_distance, repeats=25)
labels5 = kclusterer.cluster(X, assign_clusters=True)
print("Silhouette score : %0.3f" % metrics.silhouette_score(X_TF, labels1, metric='euclidean'))
print("Silhouette score : %0.3f" % metrics.silhouette_score(X_TFIDF, labels2, metric='euclidean'))
print("Silhouette score : %0.3f" % metrics.silhouette_score(X_TF, labels3, metric='euclidean'))
print("Silhouette score : %0.3f" % metrics.silhouette_score(X_TFIDF, labels4, metric='euclidean'))
print("Silhouette score : %0.3f" % metrics.silhouette_score(X, labels5, metric='euclidean'))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
31,
9800,
25,
1081,
2611,
347,
4134,
280,
2395,
201,
198,
37811,
201,
198,
201,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
1330... | 2.43949 | 785 |
import snailfish
import unittest
import utils
if __name__ == '__main__':
unittest.main()
| [
11748,
47374,
11084,
198,
11748,
555,
715,
395,
198,
11748,
3384,
4487,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.694444 | 36 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#from weibopy.api import API
from weibo import APIClient
import sys,os,urllib,urllib2,cookielib,httplib
import webbrowser
import urlparse
#模拟授权并且获取回调地址上的code,以获得acces token和token过期的UNIX时间
client = APIClient(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=CALLBACK_URL)
referer_url = client.get_authorize_url()
print "referer url is : %s" % referer_url
cookies = urllib2.HTTPCookieProcessor()
opener = urllib2.build_opener(cookies)
urllib2.install_opener(opener)
postdata = {"client_id": APP_KEY,
"redirect_uri": CALLBACK_URL,
"userId": USERID,
"passwd": PASSWD,
"isLoginSina": "0",
"action": "submit",
"response_type": "code",
}
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0",
"Host": "api.weibo.com",
"Referer": referer_url
}
req = urllib2.Request(
url = AUTH_URL,
data = urllib.urlencode(postdata),
headers = headers
)
try:
resp = urllib2.urlopen(req)
#print "callback url is : %s" % resp.geturl()
code = resp.geturl()[-32:]
#print "code is : %s" % resp.geturl()[-32:]
except Exception, e:
print e
return code
APP_KEY = 'xxxxxxxx' # app key
APP_SECRET = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' # app secret
CALLBACK_URL = 'https://api.weibo.com/oauth2/default.html' # callback url
client = APIClient(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=CALLBACK_URL)
code = get_code()
r = client.request_access_token(code)
print r
access_token = r.access_token # 新浪返回的token,类似abc123xyz456
expires_in = r.expires_in # token过期的UNIX时间
client.set_access_token(access_token, expires_in)
#发普通微博
client.statuses.update.post(status=u'test')
#发图片微博
f = open('C:/pic/test.jpg', 'rb')
r = client.statuses.upload.post(status=u'测试OAuth 2.0带图片发微博', pic=f)
f.close() # APIClient不会自动关闭文件,需要手动关闭
begin() | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
6738,
356,
571,
11081,
13,
15042,
1330,
7824,
198,
6738,
356,
26762,
1330,
3486,
2149,
75,
1153,
198,
11748,
25064,
11,
4... | 2.109155 | 852 |
""""The code determines, if the user input is an
even number or an odd number"""
integer_number = int(input("Enter an integer number: "))
if integer_number % 2 == 0:
print(integer_number, "is an even number.")
else:
print(integer_number," is an odd number.") | [
15931,
15931,
464,
2438,
15947,
11,
611,
262,
2836,
5128,
318,
281,
220,
198,
10197,
1271,
393,
281,
5629,
1271,
37811,
198,
41433,
62,
17618,
796,
493,
7,
15414,
7203,
17469,
281,
18253,
1271,
25,
366,
4008,
198,
198,
361,
18253,
62,... | 3.358974 | 78 |
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse
from django.template import RequestContext, loader
from models import Profile, Post, PostComment
from forms import ProfileForm
from readings.models import Book, Day
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
import datetime, operator, json
@login_required(login_url='/account/login')
@login_required(login_url='/account/login')
# ------------------
# Ajax functions
# -----------------
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
651,
62,
15252,
62,
273,
62,
26429,
11,
18941,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
28243,
1330,
19390,
21947,
11,
40213,
198,
673... | 3.68 | 150 |
#!/usr/bin/env python
'''
Modified version of: noisy_odom.py, Team Leonard, University of Birmingham Intelligent Robotics 2018
'''
import rospy
import math
import random
# gauss() is an inbuilt method of the random module
from random import gauss
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Quaternion, Vector3
from std_srvs.srv import Empty, EmptyResponse
import tf2_ros
# sl = standard deviation of the linear velocity Gaussian noise
# sa = standard deviation of the angular velocity Gaussian noise
sl, sa = 0.1, 0.5
def rotateQuaternion(q_orig, yaw):
"""
Converts a basic rotation about the z-axis (in radians) into the
Quaternion notation required by ROS transform and pose messages.
:Args:
| q_orig (geometry_msgs.msg.Quaternion): to be rotated
| yaw (double): rotate by this amount in radians
:Return:
| (geometry_msgs.msg.Quaternion) q_orig rotated yaw about the z axis
"""
# Create a temporary Quaternion to represent the change in heading
q_headingChange = Quaternion()
p = 0
y = yaw / 2.0
r = 0
sinp = math.sin(p)
siny = math.sin(y)
sinr = math.sin(r)
cosp = math.cos(p)
cosy = math.cos(y)
cosr = math.cos(r)
q_headingChange.x = sinr * cosp * cosy - cosr * sinp * siny
q_headingChange.y = cosr * sinp * cosy + sinr * cosp * siny
q_headingChange.z = cosr * cosp * siny - sinr * sinp * cosy
q_headingChange.w = cosr * cosp * cosy + sinr * sinp * siny
# Multiply new (heading-only) quaternion by the existing (pitch and bank)
# quaternion. Order is important! Original orientation is the second
# argument rotation which will be applied to the quaternion is the first
# argument.
return multiply_quaternions(q_headingChange, q_orig)
def multiply_quaternions( qa, qb ):
"""
Multiplies two quaternions to give the rotation of qb by qa.
:Args:
| qa (geometry_msgs.msg.Quaternion): rotation amount to apply to qb
| qb (geometry_msgs.msg.Quaternion): to rotate by qa
:Return:
| (geometry_msgs.msg.Quaternion): qb rotated by qa.
"""
combined = Quaternion()
combined.w = (qa.w * qb.w - qa.x * qb.x - qa.y * qb.y - qa.z * qb.z)
combined.x = (qa.x * qb.w + qa.w * qb.x + qa.y * qb.z - qa.z * qb.y)
combined.y = (qa.w * qb.y - qa.x * qb.z + qa.y * qb.w + qa.z * qb.x)
combined.z = (qa.w * qb.z + qa.x * qb.y - qa.y * qb.x + qa.z * qb.w)
return combined
def getHeading(q):
"""
Get the robot heading in radians from a Quaternion representation.
:Args:
| q (geometry_msgs.msg.Quaternion): a orientation about the z-axis
:Return:
| (double): Equivalent orientation about the z-axis in radians
"""
yaw = math.atan2(2 * (q.x * q.y + q.w * q.z),
q.w * q.w + q.x * q.x - q.y * q.y - q.z * q.z)
return yaw
"""
def simple_gaussian(odom):
"Applies simple gaussian noise to current position and odometry readings."
sp, sr = 0.01, 0.008
pos = odom.pose.pose.position
odom.pose.pose.position = Point(gauss(pos.x, sp), gauss(pos.y, sp), gauss(pos.z, sp))
rot = odom.pose.pose.orientation
odom.pose.pose.orientation = Quaternion(gauss(rot.x, sr), gauss(rot.y, sr), gauss(rot.z, sr), gauss(rot.w, sr))
return odom
"""
def add_noise(odom):
'''
using the linear and angular velocities extracted from each odometry
message: add noise to these velocities and add them to the current
fictional position to keep track independently of the positions reported
by the odometry.
'''
global cl_odom
# If cl_odom is not defined, then it must be the first callback
if 'cl_odom' not in globals():
cl_odom = odom
else:
# Get velocities
lv = odom.twist.twist.linear
av = odom.twist.twist.angular
dt = (odom.header.stamp - cl_odom.header.stamp).nsecs * 1e-9
# Add noise to velocities (sl, sa: standard deviations)
lv = Vector3(gauss(lv.x, sl), gauss(lv.y, sl), lv.z)
av = Vector3(av.x, av.y, gauss(av.z, av.z * sa))
# Apply velocities to orientation of last location
cl_ori = cl_odom.pose.pose.orientation
odom.pose.pose.orientation = rotateQuaternion(cl_ori, av.z * dt)
odom.twist.twist.angular = av
yaw = getHeading(odom.pose.pose.orientation) % (2 * math.pi)
# Apply velocities to position of last location
cl_pos = cl_odom.pose.pose.position
fwd, drift = lv.x * dt, lv.y * dt
c = math.cos(yaw)
s = math.sin(yaw)
odom.pose.pose.position.x = cl_pos.x + c * fwd + s * drift
odom.pose.pose.position.y = cl_pos.y + s * fwd + c * drift
odom.twist.twist.linear = lv
# Set cl_odom to odom
cl_odom = odom
# broadcast transform
if publish_tf:
broadcast_tf(odom, rospy.Time.now())
if __name__ == '__main__':
global pub, shutdown_flag, tf_broadcaster, publish_tf
shutdown_flag = False
publish_tf = True
rospy.init_node('noisy_odometry')
pub = rospy.Publisher('wheel_odom', Odometry, queue_size=1)
rospy.Subscriber('odom', Odometry, odometry_callback)
shutdown_service = rospy.Service('/noisy_odom/shutdown', Empty, shutdown_callback)
tf_buffer = tf2_ros.Buffer()
tf_listener = tf2_ros.TransformListener(tf_buffer)
tf_broadcaster = tf2_ros.TransformBroadcaster()
rospy.loginfo("Started noisy odometry publisher node")
# cleanup on shutdown
rospy.on_shutdown(clean_shutdown)
# equivalent to spin()
while not rospy.core.is_shutdown() and not shutdown_flag:
rospy.rostime.wallsleep(0.5)
rospy.Timer(rospy.Duration(1), rospy.signal_shutdown('Shutting down'), oneshot=True)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
7061,
6,
198,
5841,
1431,
2196,
286,
25,
31210,
62,
375,
296,
13,
9078,
11,
4816,
20131,
11,
2059,
286,
18899,
49452,
47061,
2864,
198,
7061,
6,
198,
198,
11748,
686,
2777,
88,
... | 2.349656 | 2,471 |
from config import db, ma
| [
6738,
4566,
1330,
20613,
11,
17266,
201,
198,
201,
198,
201,
198
] | 2.583333 | 12 |
from app import db
| [
6738,
598,
1330,
20613,
198
] | 3.8 | 5 |
xCoordinate =[1]*10
| [
87,
7222,
45480,
796,
58,
16,
60,
9,
940,
198,
220,
220,
220,
220,
220,
220,
220,
220,
628,
628
] | 1.6 | 20 |
import copy
from mmcv.utils import build_from_cfg
from mmcv.runner import OPTIMIZERS
from fastda.utils import get_root_logger
| [
11748,
4866,
198,
6738,
8085,
33967,
13,
26791,
1330,
1382,
62,
6738,
62,
37581,
198,
6738,
8085,
33967,
13,
16737,
1330,
39852,
3955,
14887,
4877,
198,
6738,
3049,
6814,
13,
26791,
1330,
651,
62,
15763,
62,
6404,
1362,
628,
198
] | 3.2 | 40 |
x = 0
while x < 5:
if x == 3:
x += 1
continue
print(x)
x += 1
print('Acabou!')
| [
87,
796,
657,
198,
4514,
2124,
1279,
642,
25,
198,
220,
220,
220,
611,
2124,
6624,
513,
25,
198,
220,
220,
220,
220,
220,
220,
220,
2124,
15853,
352,
198,
220,
220,
220,
220,
220,
220,
220,
2555,
198,
220,
220,
220,
3601,
7,
87,... | 1.741935 | 62 |
#!/usr/bin/python
#
# list_unused_munki_pkgs.py
# Tim Sutton
#
# Simple script to list all Munki pkgs not currently referenced in a specific list
# of catalogs.
# It does not delete anything.
#
# CATALOGS can be modified to a list of catalogs in your repo that should be indexed.
# PKGS_ROOT must also be defined to be root of the mounted 'pkgs' folder.
#
# This script needs to access your Munki repo as a client to retrieve catalog data, and
# therefore requires that all client tools are installed and a valid configuration exists
# for the ManagedInstalls domain. If there are additional secure preferences such as HTTP Basic
# Auth stored in /private/var/root, you would need to run this as root.
import os
import sys
sys.path.append('/usr/local/munki')
sys.path.append('/Applications/Utilities/Managed Software Update.app/Contents/Resources')
from munki import humanReadable
from munkilib import updatecheck
CATALOGS = ['testing', 'production']
PKGS_ROOT = '/Volumes/munki_repo/pkgs'
updatecheck.getCatalogs(CATALOGS)
defined_locations = []
for c in CATALOGS:
for item in updatecheck.CATALOG[c]['items']:
for path_key in ['installer_item_location', 'uninstaller_item_location']:
if path_key in item.keys():
report_item = {}
report_item['path'] = os.path.join(PKGS_ROOT, item[path_key])
report_item['size'] = item['installer_item_size']
defined_locations.append(report_item)
totalbytes = 0
print "%-100s %-16s" % ("Path", "Size")
print
for r, d, f in os.walk(PKGS_ROOT):
for phile in f:
if (phile.endswith('.dmg') or phile.endswith('.pkg')) and \
'.AppleDouble' not in r and \
not phile.startswith('._'):
repo_pkg_path = os.path.join(r, phile)
relative_path = repo_pkg_path.split(PKGS_ROOT + '/')[1]
if repo_pkg_path not in [k['path'] for k in defined_locations]:
item_size = os.path.getsize(repo_pkg_path)
print "%-100s %-16s" % (relative_path, humanReadable(item_size / 1024))
totalbytes += item_size
print
print "Total size: %s" % (humanReadable(totalbytes / 1024))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
1351,
62,
403,
1484,
62,
76,
2954,
72,
62,
35339,
82,
13,
9078,
198,
2,
5045,
39964,
198,
2,
198,
2,
17427,
4226,
284,
1351,
477,
337,
2954,
72,
279,
10025,
82,
407,
3058,
... | 2.516092 | 870 |
from django.urls import path
from . import views
urlpatterns = [
path('access/token', views.getAccessToken, name='get_mpesa_access_token'),
path('online/lipa', views.lipa_na_mpesa_online, name='lipa_na_mpesa'),
# register, confirmation, validation and callback urls
path('c2b/register', views.register_urls, name="register_mpesa_validation"),
path('c2b/confirmation', views.confirmation, name="confirmation"),
path('c2b/validation', views.validation, name="validation"),
path('c2b/callback', views.call_back, name="call_back"),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
15526,
14,
30001,
3256,
5009,
13,
1136,
15457,
30642,
11,
1438,
11639,
1136,
62,
3149... | 2.847716 | 197 |
"""
Utility for loading SNP data.
"""
__author__ = "Devon Hjelm"
__copyright__ = "Copyright 2014, Mind Research Network"
__credits__ = ["Devon Hjelm"]
__licence__ = "3-clause BSD"
__email__ = "dhjelm@mrn.org"
__maintainer__ = "Devon Hjelm"
import argparse
import copy
import logging
import gzip
import numpy as np
from os import listdir
from os import path
import random
import re
from pylearn2.utils import serial
import sys
import warnings
logging.basicConfig(format="[%(levelname)s]:%(message)s")
logger = logging.getLogger(__name__)
def parse_bim_line(line):
"""
Parse a bim line.
Format should be: chromosome SNP_name 0 location allele_1 allele_2. allele_1 != allele_2.
Parameters
----------
line: str
bim line to parse.
Returns: tuple
Dictionary entry with SNP name as key.
"""
elems = line.translate(None, "\n").split("\t")
if len(elems) == 1:
elems = line.translate(None, "\n").split(" ")
try:
assert len(elems) == 6
chromosome = int(elems[0])
SNP_name = elems[1]
assert int(elems[2]) == 0, "Third index is not 0"
location = int(elems[3])
allele_1 = elems[4]
assert allele_1 in "TCAG", "Allele not in TCAG"
allele_2 = elems[5]
assert allele_2 in "TCAG", "Allele not in TCAG"
assert allele_1 != allele_2, "Allele 1 and 2 are equal."
except AssertionError as e:
raise ValueError("Could not parse bim line \"%s\"(%s)" % (line, e))
return (SNP_name, {"location": location, "chromosome": chromosome,
"allele_1": allele_1, "allele_2": allele_2})
def parse_haps_line(line):
"""
Parse a haps line.
Format should be: chromosome SNP_name location minor(index) major(index) + subject data. Subject data
are pairs 00->0, 01->1, 10->1, 11->2
Parameters
----------
line: str
haps line to parse.
Returns: tuple
Dictionary entry with SNP name as key.
"""
elems = line.translate(None, "\n").split(" ")
elems = [e for e in elems if e != ""]
try:
assert (len(elems) - 5) % 2 == 0, "Line length error (%s)" % (elems, )
chromosome = int(elems[0])
SNP_name = elems[1]
location = int(elems[2])
minor = int(elems[3])
major = int(elems[4])
assert (minor, major) in [(1, 2), (2, 1)], "Minor major error (%s)" % ((minor, major),)
values = np.zeros((len(elems) - 5) // 2, dtype=np.int8)
for i in range(5, len(elems), 2):
x = int(elems[i])
y = int(elems[i+1])
assert (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)], "Value error (%s)" % ((x, y),)
values[(i - 5) // 2] = x + y
except AssertionError as e:
raise ValueError("Could not parse haps line \"%s\" (%s)" % (line, e))
return (SNP_name, {"location": location, "chromosome": chromosome,
"minor": minor, "major": major, "values": values,
"raw_values": elems[5:]})
def parse_tped_line(line):
"""
Parse a line in tped format.
Line format should be: chromosome SNP_name 0 location + subject data.
Subject data should be pairs in "TCAG".
Parameters
----------
line: str
tped line to be parsed.
Returns
-------
dictionary entry: tuple
Dictionary entry with the name of the SNP as the key and a dictionary
of location and values.
"""
try:
elems = line.translate(None, '\n').split(" ")
assert (len(elems) - 4) % 2 == 0
chromosome = int(elems[0])
SNP_name = elems[1]
assert int(elems[2]) == 0, "Third element is not 0"
location = int(elems[3])
values = []
for j in range(4, len(elems), 2):
assert elems[j] in "TCAG", "Element %d not in TCAG" % j
assert elems[j+1] in "TCAG", "Element %d not in TCAG" % j+1
values.append((elems[j], elems[j+1]))
except AssertionError as e:
raise ValueError("Could not parse tped line \"%s\"(%s)" % (line, e))
return (SNP_name, {"location": location, "chromosome": chromosome,
"values": values})
def parse_gen_line(line):
"""
Parse a line in gen format.
Format should be snp_%d name location minor_allele major_allele + subject data.
Subject data is in format binary triples where the number of on bits sums to 1,
e.g., 001 or 100.
Parameters
----------
line: str
Line to be parsed.
Returns
-------
dictionary entry: tuple
Dictionary entry with the name of the SNP as the key and a dictionary
of location and values.
"""
try:
elems = line.translate(None, '\n').split(" ")
assert (len(elems) - 5) % 3 == 0, "Incorrect line length (%d)." % len(elems)
assert "snp" in elems[0], "First element not snp number."
SNP_name, location, allele_1, allele_2 = elems[1:5]
assert allele_1 in "TCAG", "Allele_1 not in TCAG"
assert allele_2 in "TCAG", "Allele_2 not in TCAG"
assert allele_1 != allele_2, "Alleles are equal."
location = int(location)
values = np.zeros((len(elems) - 5) // 3, dtype=np.int8)
for j in range(5, len(elems), 3):
for i in range(j, j+3):
elems[i] = int(elems[i])
assert (sum(elems[j:j+3]) == 1), "Line segment value does not add to 1 (%d,%d,%d)" % elems[j:j+3]
values[(j - 5) / 3] = elems[j:j+3].index(1)
except AssertionError as e:
raise ValueError("Could not parse gen line \"%s\"(%s)" % (line, e))
return (SNP_name, {"location": location, "values": values,
"allele_1": allele_1, "allele_2": allele_2})
def parse_dat_file(dat_file):
"""
Parse a complete dat file.
dat files are transposed wrt the rest of the data formats here. In addition, they only contain integer fields,
so we can use np.loadtxt.
First 6 columns are ignored.
Note: must have a bims and info file to process completely.
Parameters
----------
dat_file: str
Path for dat file to process.
Returns
-------
data: array-like
"""
data = np.loadtxt(dat_file)
data = data[:, 6:].T
return data
def convert_dat_to_haps(data, info_dict):
"""
Converts dat to haps.
Parameters
----------
data: array-like
Data to be converted
info_dict: dict
Haps dictionary with empty info_dict["values"]
Returns
-------
new_haps_dict: dict
New haps dictionary from data
"""
assert info_dict["ext"] == "info"
assert (len(info_dict) - 1) == (data.shape[0] // 2), (len(info_dict), data.shape)
new_haps_dict = copy.deepcopy(info_dict)
keys = [k for k in info_dict.keys() if k != "ext"]
data_idx = [info_dict[k]["line_number"] for k in keys]
for j, SNP_name in enumerate(keys):
if SNP_name == "rsdummy":
continue
i = 2 * data_idx[j]
assert i < data.shape[0], (i, data.shape[0])
data_entry = data[i:i+2]
assert data_entry.shape[0] == 2,\
"data entry shape on SNP %s is %s (idx %d out of %d)" % (SNP_name, data_entry.shape, i, data.shape[0])
value_entry = data_entry.sum(axis=0) - 2
assert SNP_name in new_haps_dict.keys(), SNP_name
new_haps_dict[SNP_name]["values"] = value_entry
assert "minor" in new_haps_dict[SNP_name], SNP_name
new_haps_dict.pop("rsdummy", None)
return new_haps_dict
def parse_labels_file(label_file):
"""
Parses a labels file.
Lables are single line with pairwise designations of controls vs cases. Space delimited.
e.g., 0 0 1 1 1 1 translates to [1, 0, 0], where 0 is for conrols and 1 is for cases.
Parameters
----------
label_file: str
Path for label file to process.
Returns
-------
labels: list of ints
The labels from the file.
"""
label_file = path.abspath(label_file)
logger.info("Parsing label file %s" % label_file)
with open(label_file, "r") as f:
lines = f.readlines()
if len(lines) != 1:
raise ValueError("Could not read label file %s, only one line allowed, %d found"\
% (label_file, len(lines)))
labels = read_line(lines[0])
return labels
def parse_file(file_name):
"""
Read a file into a dictionary.
Extensions are .bim, .haps, .tped, or .gen
Keys are SNP names, entries depend on the filetype.
Parameters
----------
file_name: str
Location of file to parse.
Returns
-------
parse_dict: dictionary
Dictionary with SNP name keys.
"""
logger.info("Parsing %s" % file_name)
exts = ["bim", "haps", "tped", "gen", "info"]
ext = file_name.split(".")[-1]
if ext == "gzip":
open_method = gzip.open
ext = file_name.split(".")[-2]
else:
open_method = open
if ext == "ped":
return
if ext not in exts:
raise NotImplementedError("Extension not supported (%s), must be in %s" % (ext, exts))
method_dict = {
"bim": parse_bim_line,
"haps": parse_haps_line,
"info": parse_haps_line,
"tped": parse_tped_line,
"gen": parse_gen_line,
}
parse_dict = {}
parse_dict["ext"] = ext
with open_method(file_name, "r") as f:
for i, line in enumerate(f.readlines()):
entry = method_dict[ext](line)
entry[1]["line_number"] = i
if entry[0] in parse_dict:
raise ValueError("Found a duplicate SNP(%s) in .%s file." % (entry[0], ext))
parse_dict[entry[0]] = entry[1]
return parse_dict
def read_chr_directory(directory):
"""
Read a directory with SNP data.
Extras data and other details from SNP files.
Parameters
----------
directory: str
Path to SNP directory.
Returns
-------
snp_dict: dict with extension keys and dictionary values.
Dictionaries depend on the extension.
"""
directory = path.abspath(directory)
file_dict = parse_chr_directory(directory)
snp_dict = {"directory": directory}
for ext in file_dict:
file_name = path.join(directory, file_dict[ext])
if ext == "dat":
continue
parse_dict = parse_file(file_name)
snp_dict[ext] = parse_dict
if "dat" in file_dict:
info_dict = snp_dict["info"]
bim_dict = snp_dict["bim"]
info_keys = [k for k in info_dict.keys() if k != "ext"]
bim_keys = [k for k in bim_dict.keys() if k != "ext"]
if len(set(info_keys) - set(bim_keys)) != 0:
logger.warning("Fixing info %d -> %d. This is a hack" % (len(info_keys), len(bim_keys)))
assert len(set(bim_keys) - set(info_keys)) == 1, set(bim_keys) - set(info_keys)
new_info = {"ext": "info"}
for k in bim_keys:
if k == "rsdummy":
new_info[k] = bim_dict[k]
continue
new_info[k] = info_dict[k]
new_info[k]["line_number"] = bim_dict[k]["line_number"]
snp_dict["info"] = new_info
file_name = path.join(directory, file_dict["dat"])
data = parse_dat_file(file_name)
parse_dict = convert_dat_to_haps(data, snp_dict["info"])
snp_dict["haps"] = parse_dict
if "tped" in snp_dict:
snp_dict["haps"] = dict((k, snp_dict["haps"][k])
for k in snp_dict["haps"].keys()
if k in snp_dict["tped"].keys())
snp_dict["bim"] = dict((k, snp_dict["bim"][k])
for k in snp_dict["bim"].keys()
if k in snp_dict["tped"].keys())
if "haps" in snp_dict:
assert "bim" in snp_dict
for key in snp_dict["bim"]:
if key == "rsdummy": continue
if key == "ext": continue
minor, major = [(snp_dict["haps"][key])[m] for m in ["minor", "major"]]
minor_allele, major_allele = [(snp_dict["bim"][key])[m]
for m in ["allele_1", "allele_2"]]
if (minor, major) == (2, 1):
minor_allele, major_allele = major_allele, minor_allele
snp_dict["haps"][key]["minor_allele"] = minor_allele
snp_dict["haps"][key]["major_allele"] = major_allele
if "tped" in snp_dict:
snp_dict["tped"][key]["minor_allele"] = minor_allele
snp_dict["tped"][key]["major_allele"] = major_allele
return snp_dict
def parse_chr_directory(directory):
"""
Parses SNP processing files from a SNP directory.
Parses out haps, bim, ped, tped, and gen files.
Parameters
----------
directory: str
SNP directory to parse.
Returns
-------
file_dict: dict
Dictionary of extension keys and file path values.
"""
# We need to ignore these files for now.
ignore_strings = ["HAPGENinput", "input"]
files = [f for f in listdir(directory) if path.isfile(path.join(directory,f))]
files = [f for f in files if not any([ignore_string in f for ignore_string in ignore_strings])]
file_dict = {}
logger.info("Found files %r in %s" % (files, directory))
for f_name in files:
ext = f_name.split(".")[-1]
if ext == "gz":
ext = f_name.split(".")[-2]
if ext == "haps":
# Gen directories will have 2 haps files...
if not "cases" in f_name:
insert(ext, f_name)
elif ext in ["bim", "ped", "gen", "tped", "info", "dat"]:
insert(ext, f_name)
else:
logger.warn("Unknown file type %s" % ext)
# raise ValueError("Unknown file type %s" % ext)
if "cases" in file_dict and "controls" in file_dict:
# Only cases and controls needed for gen files.
file_dict = dict((f, file_dict[f]) for f in ["cases", "controls"])
else:
for key in ["bim", "haps"]:
if key not in file_dict and ("dat" not in file_dict):
raise ValueError("%s not found in %s (%s)" % (key, directory, file_dict))
if "tped" not in file_dict:
logger.warning("tped file not found in %s, process only with .haps" % directory)
if "dat" in file_dict:
assert "info" in file_dict
assert "bim" in file_dict
logger.info("Parsed %s to %r" % (directory, file_dict))
return file_dict
def read_dataset_directory(directory, chromosomes=22,
snps_reference=None, align_reference=None, nofill=False):
"""
Reads a SNP dataset directory with multiple chromosomes.
Note: Directory must contrain subdirectories with names "chr%d" or chr%d_synthetic
which fit the chromosome directory specification in parse_chromosome_directory.
Parameters
----------
directory: str
Directory to read dataset from.
chromosomes: int, optional
Number of chromosomes to process.
reference_directory: str, optional
Reference directory to align allelesa and SNPs.
Returns
-------
dataset_dict: dict
Dictionary of chromosome or "labels" keys and file dictionary or labels values.
"""
logger.info("Reading %d chromosomes from directory %s" % (chromosomes, directory))
dir_dict = parse_dataset_directory(directory, chromosomes=chromosomes)
dataset_dict = {}
for key in dir_dict:
if key == "labels":
dataset_dict["labels"] = parse_labels_file(dir_dict[key])
else:
chr_dict = read_chr_directory(dir_dict[key])
dataset_dict[key] = chr_dict
if "labels" not in dataset_dict:
for c in range(1, chromosomes + 1):
assert "cases" in dataset_dict[c]
assert "controls" in dataset_dict[c]
assert have_same_SNP_order(dataset_dict[c]["cases"], dataset_dict[c]["controls"])
if snps_reference is not None:
logger.info("Setting to snp reference")
snps_ref_dataset_dict, _, _ = read_dataset_directory(snps_reference,
chromosomes=chromosomes)
for key in snps_ref_dataset_dict:
if key == "labels":
continue
assert isinstance(key, int)
assert key in dataset_dict
chr_dict = dataset_dict[key]
snps_ref_chr_dict = get_dict(snps_ref_dataset_dict[key])
for ext in ["tped", "haps", "cases", "controls"]:
if ext not in chr_dict:
continue
data_dict = set_A_with_B(chr_dict[ext], snps_ref_chr_dict, nofill=nofill)
dataset_dict[key][ext] = data_dict
else:
snps_ref_dataset_dict = None
if align_reference is not None:
logger.info("Aligning")
align_ref_dataset_dict, _, _ = read_dataset_directory(align_reference,
chromosomes=chromosomes)
for key in align_ref_dataset_dict:
if key == "labels":
continue
assert key in dataset_dict
chr_dict = dataset_dict[key]
align_ref_chr_dict = align_ref_dataset_dict[key]
if "controls" in chr_dict:
continue
elif "tped" in chr_dict:
assert "tped" in align_ref_chr_dict
ext = "tped"
elif "haps" in chr_dict:
continue
else:
raise ValueError()
data_dict = align_A_to_B(chr_dict[ext], align_ref_chr_dict[ext])
dataset_dict[key][ext] = data_dict
else:
align_ref_dataset_dict = None
return dataset_dict, snps_ref_dataset_dict, align_ref_dataset_dict
def pull_dataset(dataset_dict, chromosomes=22, shuffle=True):
"""
Pull complete dataset from a dataset directory.
TODO: currently concatenates the data. Needs to save each chromosome dataset to a different
numpy file instead. Or return lists or dicts of array-likes
Parameters
----------
dataset_dict: dict
Dictionary of chromosome or "labels" keys and file dictionary or labels values.
Returns
-------
data, labels: array-like, list
The data and labels. TODO: return list or dict.
"""
if "labels" not in dataset_dict:
num_cases = None
num_controls = None
data_dict = {}
for c in range(1, chromosomes + 1):
try:
cases = dataset_dict[c]["cases"]
controls = dataset_dict[c]["controls"]
assert have_same_SNP_order(cases, controls),\
"Cases and controls have different SNPs."
cases_data = pull_gen_data(cases)
controls_data = pull_gen_data(controls)
if num_cases is None:
num_cases = cases_data.shape[0]
if num_controls is None:
num_controls = controls_data.shape[0]
assert cases_data.shape[0] == num_cases,\
"Cases data has inconsistent subjects (%d vs %d)"\
% (cases_data.shape[0], num_cases)
assert controls_data.shape[0] == num_controls,\
"Control data has inconsistent subjects (%d vs %d)"\
% (control_data.shape[0], num_controls)
assert cases_data.shape[1] == controls_data.shape[1],\
"Cases and controls have difference number of columns (%d vs %d)."\
% (cases_data.shape[1], controls_data.shape[1])
data_dict[c] = np.concatenate((controls_data, cases_data), axis=0)
except AssertionError as e:
raise ValueError("Pulling data from dataset chromosome %d failed (%s)" % (c, e))
labels = [0] * num_controls + [1] * num_cases
else:
data_dict = {}
for c in range(1, chromosomes + 1):
if "tped" in dataset_dict[c]:
chr_data = pull_tped_data(dataset_dict[c]["tped"])
else:
chr_data = pull_haps_data(dataset_dict[c]["haps"])
data_dict[c] = chr_data
labels = dataset_dict["labels"]
if shuffle:
logger.info("Shuffling data")
idx = range(len(labels))
random.shuffle(idx)
labels = [labels[i] for i in idx]
for key in data_dict:
data_dict[key] = data_dict[key][idx]
return data_dict, labels
def pull_haps_data(haps_dict, reference_names=None):
"""
Pull data from a haps dictionary.
Parameters
----------
haps_dict: dict
A haps dictionary (see read_haps_line)
reference_names: list, optional
List of SNP names to use as reference.
Returns
-------
data: array-like
"""
logger.info("Getting haps data.")
samples = haps_dict[haps_dict.keys()[0]]["values"].shape[0]
if reference_names == None:
reference_names = [k for k in haps_dict.keys() if k != "ext"]
data = np.zeros((samples, len(reference_names)), dtype=np.int8)
reference_names = sorted(list(reference_names))
for i, SNP_name in enumerate(reference_names):
assert SNP_name != "ext"
data[:, i] = haps_dict[SNP_name]["values"]
return data
def pull_tped_data(tped_dict, reference_names=None):
"""
Pull data from a tped dictionary.
Parameters
----------
tped_dict: dict
A tped dictionary (see read_tped_line)
reference_names: list, optional
List of SNP names to use as reference.
Returns
-------
data: array-like
"""
logger.info("Getting tped data.")
samples = len(tped_dict[tped_dict.keys()[0]]["values"])
if reference_names == None:
reference_names = [k for k in tped_dict.keys() if k != "ext"]
reference_names = sorted(list(reference_names))
data = np.zeros((samples, len(reference_names)), dtype=np.int8)
for i, SNP_name in enumerate(reference_names):
assert SNP_name != "ext"
minor, major = [tped_dict[SNP_name][m] for m in ["minor_allele", "major_allele"]]
values = tped_dict[SNP_name]["values"]
for j, value in enumerate(values):
x, y = tuple(0 if v == minor else 1 for v in value)
data[j, i] = x + y
return data
def pull_gen_data(gen_dict, reference_names=None):
"""
Pull data from a gen dictionary.
Parameters
----------
gen_dict: dict
A gen dictionary (see read_gen_line)
reference_names: list, optional
List of SNP names to use as reference.
Returns
-------
data: array-like
"""
logger.info("Getting gen data")
if reference_names is None:
reference_names = [k for k in gen_dict.keys() if k != "ext"]
samples = len(gen_dict[reference_names[0]]["values"])
reference_names = sorted(list(reference_names))
data = np.zeros((samples, len(reference_names)), dtype=np.int8)
for i, SNP_name in enumerate(reference_names):
assert SNP_name != "ext"
data[:, i] = gen_dict[SNP_name]["values"]
return data
def pull_data(file_dict, reference_names=None):
"""
Pull data from a arbitrary file dictionary.
Parameters
----------
file_dict: dict
A file dictionary. Must be "haps", "tped", or "gen".
reference_names: list, optional
List of SNP names to use as reference.
Returns
-------
data: array-like
"""
if reference_names is not None:
assert "ext" not in reference_names
ext = file_dict["ext"]
if ext == "gen":
return pull_gen_data(file_dict, reference_names)
elif ext == "haps":
return pull_haps_data(file_dict, reference_names)
elif ext == "tped":
return pull_tped_data(file_dict, reference_names)
else:
raise ValueError("Cannot pull data from extension %s" % ext)
def check_directory(dir_dict):
"""
Directory checker.
Makes sure directory is consistent with expectations.
Paramters
---------
dir_dict: dict
Directory dictionary to be checked.
"""
if "haps" in dir_dict:
assert "tped" in dir_dict
reference_names = [k for k in dir_dict["tped"].keys() if k != "ext"]
haps_data = pull_data(dir_dict["haps"], reference_names=reference_names)
tped_data = pull_data(dir_dict["tped"], reference_names=reference_names)
assert np.all(haps_data == tped_data), "%r\n%r" % (haps_data.shape, tped_data.shape)
logger.info("haps and tped have the same data.")
def compare_SNPs(file_A, file_B):
"""
Compares the SNPs from two files or dictionaries.
Parameters
----------
file_A: str or dict
File path or dictionary.
file_B: str or dict
File path or dictionary.
"""
if isinstance(file_A, str):
file_A = path.abspath(file_A)
dict_A = parse_file(file_A)
else:
assert isinstance(file_A, dict)
dict_A = file_A
if isinstance(file_B, str):
file_B = path.abspath(file_B)
dict_B = parse_file(file_B)
else:
assert isinstance(file_B, dict)
dict_B = file_B
SNPs_A = set(dict_A.keys())
SNPs_B = set(dict_B.keys())
a_not_in_b = SNPs_A.difference(SNPs_B)
b_not_in_a = SNPs_B.difference(SNPs_A)
neg_intercept = SNPs_A.symmetric_difference(SNPs_B)
print "Comparing A and B:"
print "%d SNPs in A not in B" % len(a_not_in_b)
if len(a_not_in_b) < 10 and len(a_not_in_b) != 0:
print a_not_in_b
print "%d SNPs in B not in A" % len(b_not_in_a)
if len(b_not_in_a) < 10 and len(b_not_in_a) != 0:
print b_not_in_a
print "%d SNPs symmetric difference A and B" % len(neg_intercept)
def A_is_compatible_reference_for_B(file_A, file_B):
"""
Checks if file_A is a compatible reference for file_B.
Compatible references have SNP names which are a strict subset of the other.
Parameters
----------
file_A: str or dict
File path or dictionary.
file_B: str or dict
File path or dictionary.
Returns
-------
compatible: bool
"""
if isinstance(file_A, str):
file_A = path.abspath(file_A)
dict_A = parse_file(file_A)
else:
assert isinstance(file_A, dict)
dict_A = file_A
if isinstance(file_B, str):
file_B = path.abspath(file_B)
dict_B = parse_file(file_B)
else:
assert isinstance(file_B, dict)
dict_B = file_B
if dict_A["ext"] == "gen":
#Gen files are never a reference.
return False
SNPs_A = set(dict_A.keys())
SNPs_B = set(dict_B.keys())
A_not_in_B = SNPs_A.difference(SNPs_B)
compatible = len(A_not_in_B) == 0
return compatible
def set_A_with_B(file_A, file_B, nofill=False):
"""
Sets A with SNPs from B and fills missing SNPs from B using priors from B.
Uses B priors to randomly set.
Note: tped dict_A will raise a ValueError.
Parameters
----------
file_A: str or dict
File path or dictionary.
file_B: str or dict
File path or dictionary.
Returns
-------
dict_A, dict
Dicitonary of A with filled SNPs.
"""
logger.info("Setting A with SNPs from B.")
if isinstance(file_A, str):
file_A = path.abspath(file_A)
dict_A = parse_file(file_A)
else:
assert isinstance(file_A, dict)
dict_A = file_A
if isinstance(file_B, str):
file_B = path.abspath(file_B)
dict_B = parse_file(file_B)
else:
assert isinstance(file_B, dict)
dict_B = file_B
SNPs_A = set([k for k in dict_A.keys() if k != "ext"])
SNPs_B = set([k for k in dict_B.keys() if k != "ext"])
b_not_in_a = SNPs_B.difference(SNPs_A)
if dict_A["ext"] in ["haps", "gen"]:
value_shape = dict_A[list(SNPs_A)[0]]["values"].shape
else:
value_shape = len(dict_A[list(SNPs_A)[0]]["values"])
new_dict_A = {}
new_dict_A["ext"] = dict_A["ext"]
for SNP_name in list(SNPs_A):
if SNP_name in SNPs_B:
new_dict_A[SNP_name] = dict_A[SNP_name]
if nofill:
SNPs_A = set([k for k in new_dict_A.keys() if k != "ext"])
a_not_in_b = SNPs_A.difference(SNPs_B)
assert len(a_not_in_b) == 0
return new_dict_A
logger.info("Filling with %d SNPs" % len(b_not_in_a))
if dict_A["ext"] == "gen":
#Gen files can't be filled right now.
return dict_A
for SNP_name in list(b_not_in_a):
assert SNP_name not in new_dict_A
B_values = dict_B[SNP_name]["values"]
if dict_B["ext"] in ["haps", "gen"]:
B_priors = [(B_values == i).sum(0) * 1. / B_values.shape[0] for i in range(3)]
elif dict_B["ext"] == "tped":
minor_allele, major_allele = (dict_B[SNP_name]["minor_allele"],
dict_B[SNP_name]["major_allele"])
allele_pairs = [[(minor_allele, minor_allele)],
[(minor_allele, major_allele), (major_allele, minor_allele)],
[(major_allele, major_allele)]]
B_priors = np.array([sum([1 for b in B_values if b in pairs]) * 1. / len(B_values)
for pairs in allele_pairs])
else:
raise ValueError("extension %s not supported" % B_dict["ext"])
assert abs(B_priors.sum() - 1) < 10e-5, B_priors.sum()
new_dict_A[SNP_name] = copy.copy(dict_B[SNP_name])
if new_dict_A["ext"] in ["haps", "gen"]:
new_dict_A[SNP_name]["values"] = np.random.choice(range(3), size=value_shape[0],
p=B_priors)
elif new_dict_A["ext"] == "tped":
minor_allele, major_allele = (dict_B[SNP_name]["minor_allele"],
dict_B[SNP_name]["major_allele"])
allele_pairs = [(minor_allele, minor_allele),
(minor_allele, major_allele),
(major_allele, major_allele)]
values = np.random.choice(range(3),
size=value_shape,
p=B_priors)
new_dict_A[SNP_name]["values"] = [allele_pairs[i] for i in values]
SNPs_A = set([k for k in new_dict_A.keys() if k != "ext"])
b_not_in_a = SNPs_B.difference(SNPs_A)
a_not_in_b = SNPs_A.difference(SNPs_B)
assert len(b_not_in_a) == 0
assert len(a_not_in_b) == 0
logger.info("Setting completed.")
return new_dict_A
def A_has_similar_priors_to_B(file_A, file_B):
"""
Checks if file_A and file_B have similar priors.
Priors are similar if for P(i)_j for i in range(3) are within 15% for 95% of the SNPs j.
Parameters
----------
file_A: str or dict
File path or dictionary.
file_B: str or dict
File path or dictionary.
Returns
-------
similar: bool
"""
if isinstance(file_A, str):
file_A = path.abspath(file_A)
dict_A = parse_file(file_A)
else:
assert isinstance(file_A, dict)
dict_A = file_A
if isinstance(file_B, str):
file_B = path.abspath(file_B)
dict_B = parse_file(file_B)
else:
assert isinstance(file_B, dict)
dict_B = file_B
reference_names = [k for k in dict_A.keys() if (k != "ext") and (k in dict_B.keys())]
data_A = pull_data(dict_A, reference_names=reference_names)
data_B = pull_data(dict_B, reference_names=reference_names)
priors_A = [(data_A == i).sum(0) * 1. / data_A.shape[0] for i in range(3)]
priors_B = [(data_B == i).sum(0) * 1. / data_B.shape[0] for i in range(3)]
assert np.allclose(priors_A[0] + priors_A[1] + priors_A[2], np.zeros(priors_A[0].shape) + 1)
assert np.allclose(priors_B[0] + priors_B[1] + priors_B[2], np.zeros(priors_B[0].shape) + 1)
similar = True
for i, (prior_A, prior_B) in enumerate(zip(priors_A, priors_B)):
percent_off = (
len(np.where(prior_A - prior_B > 0.15)[0].tolist()) * 1. / prior_A.shape[0])
percent_reversed = (len(np.where(
np.logical_and(
prior_A - priors_B[-(i - 1) + 1] <= 0.15,
prior_A - prior_B > 0.15))[0].tolist()) * 1. /prior_A.shape[0])
if percent_off > .05:
logger.warn("Priors P(%d) not close: %.2f%% off by 15%% or more"
% (i, percent_off * 100))
logger.warn("Priors P(%d) reversed for %.2f%% of SNPs"
% (i, percent_reversed * 100))
similar = False
else:
logger.info("Priors P(%s) close: %.2f%% off by 15%% or more"
% (i, percent_off * 100))
return similar
def A_is_aligned_to_B(file_A, file_B):
"""
Checks if file_A and file_B are aligned.
Files or dictionaries are aligned if one is a compatible
reference for the other and the major / minor specifications are the same.
Parameters
----------
file_A: str or dict
File path or dictionary.
file_B: str or dict
File path or dictionary.
Returns
-------
aligned: bool
"""
if isinstance(file_A, str):
file_A = path.abspath(file_A)
dict_A = parse_file(file_A)
else:
assert isinstance(file_A, dict)
dict_A = file_A
if isinstance(file_B, str):
file_B = path.abspath(file_B)
dict_B = parse_file(file_B)
else:
assert isinstance(file_B, dict)
dict_B = file_B
reference_keys = [k for k in dict_A.keys() if (k != "ext") and (k in dict_B.keys())]
not_aligned = 0
for key in reference_keys:
if key == "ext":
continue
if dict_A[key]["minor_allele"] != dict_B[key]["minor_allele"]:
#logger.info("Alleles for %s not aligned (%s, %s) vs (%s, %s)"
# % (key, dict_A[key]["minor_allele"], dict_A[key]["major_allele"],
# dict_B[key]["minor_allele"], dict_B[key]["major_allele"]))
not_aligned += 1
logger.info("%d alleles for A and B are not aligned" % not_aligned)
aligned = not_aligned == 0
return aligned
def align_A_to_B(file_A, file_B):
"""
Align two files or dictionaries.
B must be a compatible reference of A.
TODO: remove this restriction and add filling possibly.
Parameters
----------
file_A: str or dict
File path or dictionary.
file_B: str or dict
File path or dictionary.
Returns
-------
dict_A: dict
An aligned dictionary for A.
"""
logger.info("Aligning files")
if isinstance(file_A, str):
file_A = path.abspath(file_A)
dict_A = parse_file(file_A)
else:
assert isinstance(file_A, dict)
dict_A = file_A
if isinstance(file_B, str):
file_B = path.abspath(file_B)
dict_B = parse_file(file_B)
else:
assert isinstance(file_B, dict)
dict_B = file_B
if dict_A["ext"] == "gen":
#gen files can't be aligned, as they have no minor/major reference.
return dict_A
reference_names = [k for k in dict_A.keys() if (k != "ext") and (k in dict_B.keys())]
for SNP_name in reference_names:
minor_A, major_A = [dict_A[SNP_name][m] for m in ["minor_allele", "major_allele"]]
minor_B, major_B = [dict_B[SNP_name][m] for m in ["minor_allele", "major_allele"]]
if minor_A == minor_B and major_A == major_B:
pass
elif minor_A == major_B and major_A == minor_B:
if dict_A["ext"] in ["haps", "gen"]:
dict_A[SNP_name]["values"] = (-(dict_A[SNP_name]["values"] - 1)) + 1
dict_A[SNP_name]["minor_allele"] = dict_B[SNP_name]["minor_allele"]
dict_A[SNP_name]["major_allele"] = dict_B[SNP_name]["major_allele"]
assert dict_A[SNP_name]["minor_allele"] == dict_B[SNP_name]["minor_allele"]
else:
raise ValueError()
assert A_is_aligned_to_B(dict_A, dict_B)
logger.info("Alignment finished.")
return dict_A
def check_flipping(haps_dict, gen_dict):
"""
Checks if haps a gen files flip consistently.
Right now they don't, so do not use.
"""
reference_names = [k for k in gen_dict.keys() if k != "ext"]
for SNP_name in reference_names:
minor, major = haps_dict[SNP_name]["minor"], haps_dict[SNP_name]["major"]
assert (minor, major) in [(1, 2), (2, 1)]
if (minor, major) == (2, 1):
assert haps_dict[SNP_name]["minor_allele"] == gen_dict[SNP_name]["major_allele"]
assert haps_dict[SNP_name]["major_allele"] == gen_dict[SNP_name]["minor_allele"]
def have_same_SNP_order(dict_A, dict_B):
"""
Checks if two dictionaries have the same SNP order.
"""
have_same_order = [k for k in dict_A.keys() if k != "ext"] == [k for k in dict_B.keys() if k != "ext"]
return have_same_order
def make_argument_parser():
"""
Creates an ArgumentParser to read the options for this script from
sys.argv
"""
parser = argparse.ArgumentParser()
parser.add_argument("--out", default="snp")
parser.add_argument("-v", "--verbose", action="store_true", help="Show more verbosity!")
subparsers = parser.add_subparsers(help="sub-command help")
subparsers.required = True
compare_parser = subparsers.add_parser("compare", help="Compare 2 chromosome directories")
compare_parser.set_defaults(which="compare")
compare_parser.add_argument("dir_1")
compare_parser.add_argument("dir_2")
extract_parser = subparsers.add_parser("extract")
extract_parser.set_defaults(which="extract")
extract_parser.add_argument("directory", help="SNP dataset directory.")
extract_parser.add_argument("out_dir", help="Output directory for Pylearn2 data.")
extract_parser.add_argument("-c", "--chromosomes", default=22,
type=int, help="Number of chromosomes to process.")
extract_parser.add_argument("-u", "--use_snps", default=None,
help="Reference dataset to use SNPs from.")
extract_parser.add_argument("-a", "--align_to", default=None)
extract_parser.add_argument("--nofill", action="store_true")
separate_parser = subparsers.add_parser("separate", help="Separate haps for GWA sim")
separate_parser.set_defaults(which="separate")
separate_parser.add_argument("chr_dir")
separate_parser.add_argument("labels")
separate_parser.add_argument("-s", "--separate_info", action="store_true")
separate_parser.add_argument("-t", "--transposed", action="store_true")
return parser
def split_haps(chr_dict, labels, separate_info=False, transposed=False):
"""
Splits haps files into 2 and saves them.
"""
if not "haps" in chr_dict:
raise ValueError()
controls_idx = [i for i, j in enumerate(labels) if j == 0]
cases_idx = [i for i, j in enumerate(labels) if j == 1]
controls_haps = split(chr_dict["haps"], controls_idx)
cases_haps = split(chr_dict["haps"], cases_idx)
try:
prefix = re.findall(r'chr\d+', chr_dict["directory"])[0] + "_"
except IndexError:
prefix = ""
if transposed:
transposed_prefix = "transposed_"
else:
transposed_prefix = ""
write_haps_file(controls_haps,
path.join(chr_dict["directory"],
prefix + transposed_prefix + "input_controls.haps"),
omit_info=separate_info, transposed=transposed)
write_haps_file(cases_haps,
path.join(chr_dict["directory"],
prefix + transposed_prefix + "input_cases.haps"),
omit_info=separate_info, transposed=transposed)
write_haps_file(chr_dict["haps"], path.join(chr_dict["directory"],
prefix.translate(None, "_") + ".info"),
info_only=separate_info)
if __name__ == "__main__":
parser = make_argument_parser()
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
if args.which == "separate":
chr_dict = read_chr_directory(args.chr_dir)
labels = parse_labels_file(args.labels)
split_haps(chr_dict, labels, args.separate_info, args.transposed)
elif args.which == "compare":
dir_dict_1 = read_chr_directory(args.dir_1)
dir_dict_2 = read_chr_directory(args.dir_2)
dict_1 = get_dict(dir_dict_1)
dict_2 = get_dict(dir_dict_2)
if have_same_SNP_order(dict_1, dict_2):
print "files have the same SNP order."
else:
print "files do not have the same SNP order."
compare_SNPs(dict_1, dict_2)
A_lessthan_B = A_is_compatible_reference_for_B(dict_1, dict_2)
print "%s can%s be used as a reference for %s"\
% (args.dir_1, "" if A_lessthan_B else " not", args.dir_2)
B_lessthan_A = A_is_compatible_reference_for_B(dict_2, dict_1)
print "%s can%s be used as a reference for %s"\
% (args.dir_2, "" if B_lessthan_A else " not", args.dir_1)
if A_is_aligned_to_B(dict_1, dict_2):
print "A and B are aligned."
else:
print "A and B are not aligned."
if A_lessthan_B:
dict_2 = align_A_to_B(dict_2, dict_1)
elif B_lessthan_A:
dict_1 = align_A_to_B(dict_1, dict_2)
else:
dict_1 = set_A_with_B(dict_1, dict_2)
dict_1 = align_A_to_B(dict_1, dict_2)
if A_has_similar_priors_to_B(dict_1, dict_2):
print "A and B have similar priors."
else:
print "A and B do not have similar priors."
elif args.which == "extract":
data_dict, snp_ref_data_dict, ref_data_dict = read_dataset_directory(
args.directory,
chromosomes=args.chromosomes,
snps_reference=args.use_snps,
align_reference=args.align_to,
nofill=args.nofill)
if snp_ref_data_dict is not None:
for key in data_dict:
if key == "labels":
continue
data_chr_dict = get_dict(data_dict[key])
snp_ref_chr_dict = get_dict(snp_ref_data_dict[key])
if args.nofill:
data_snps = set([k for k in data_chr_dict if k != "ext"])
snp_ref_snps = set([k for k in snp_ref_chr_dict if k != "ext"])
in_data_not_in_ref = data_snps - snp_ref_snps
assert len(in_data_not_in_ref) == 0, len(in_data_not_in_ref)
logger.info("Data now a strict subset of reference with %d SNPs" % len(data_snps))
else:
assert have_same_SNP_order(data_chr_dict, snp_ref_chr_dict)
logger.info("Data now the same set of SNPs as reference")
if ref_data_dict is not None:
for key in data_dict:
if key == "labels":
continue
ext1 = get_ext(data_dict[key])
ext2 = get_ext(ref_data_dict[key])
assert A_has_similar_priors_to_B(data_dict[key][ext1],
ref_data_dict[key][ext2])
data, labels = pull_dataset(data_dict, chromosomes=args.chromosomes)
out_dir = serial.preprocess("${PYLEARN2_NI_PATH}/" + args.out_dir)
assert path.isdir(out_dir), out_dir
logger.info("Saving labels to %s" % out_dir)
np.save(path.join(out_dir, "labels.npy"), labels)
for c in data_dict:
if c == "labels":
continue
logger.info("Saving chromosome %d to %s" %
(c, path.join(out_dir, "chr%d.npy" % c)))
np.save(path.join(out_dir, "chr%d.npy" % c), data[c])
save_snp_names(path.join(out_dir, "chr%d.snps" % c),
[k for k in data_dict[c][get_ext(data_dict[c])]
if k != "ext"])
| [
37811,
198,
18274,
879,
329,
11046,
25632,
1366,
13,
198,
37811,
198,
198,
834,
9800,
834,
796,
366,
13603,
261,
367,
73,
417,
76,
1,
198,
834,
22163,
4766,
834,
796,
366,
15269,
1946,
11,
10175,
4992,
7311,
1,
198,
834,
66,
20696,
... | 2.124171 | 20,971 |
#!/usr/bin/python
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from Adafruit_IO import Client
import RPi.GPIO as GPIO
import Adafruit_DHT
import time
from Adafruit_LED_Backpack import SevenSegment
# make connection to adafruit IO
aio = Client('df25f448176d4b07b85faaa103b86914')
# Parse command line parameters.
sensor_args = { '11': Adafruit_DHT.DHT11,
'22': Adafruit_DHT.DHT22,
'2302': Adafruit_DHT.AM2302 }
if len(sys.argv) == 3 and sys.argv[1] in sensor_args:
sensor = sensor_args[sys.argv[1]]
pin = sys.argv[2]
else:
print('usage: sudo ./Adafruit_DHT.py [11|22|2302] GPIOpin#')
print('example: sudo ./Adafruit_DHT.py 2302 4 - Read from an AM2302 connected to GPIO #4')
#hardcode the sensor and pin
sensor = Adafruit_DHT.AM2302
pin = 4
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
# Un-comment the line below to convert the temperature to Fahrenheit.
temperature = temperature * 9/5.0 + 32
# Now here we set the constants
#
# templow is when we turn off the fridge
# temphigh is when we turn on the fridge
#
# humlow is when we turn on the humidifier
# humhigh is when we turn off the humidifier
#
# fridgepin is the GPIO pin we are using to control the fridge relay
# humpin is the humidifier relay pin we are using
#
# #################################################
templow = 58
temphigh = 60
humlow = 75
humhigh = 90
fridgepin = 26
humpin = 17
# now if we are above temphigh, turn on the fridge pin
# we are just going to turn it on until we hit the threshold then for a littl elonger
# hoping we can rely on the thermal mass of the fridge to keep it constant
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(fridgepin,GPIO.OUT)
# gonna have to set up the humidity pin out
GPIO.setup(humpin,GPIO.OUT)
# initialize
fridgeON = 0
humidityON = 0
# start control loop
for x in range(30):
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
# convert to fh
temperature = temperature * 9/5.0 + 32
# if the temperature is above our high temp, turn the fridge on
if (temperature > temphigh):
GPIO.output(fridgepin,GPIO.HIGH) # turn on fridgepin
fridgeON = 1
print("fridge on")
elif temperature < templow:
GPIO.output(fridgepin,GPIO.LOW) # if the temp is too low, turn the fridge ofj
fridgeON = 0
print("fridge off")
# do the same for humidity
if (humidity >humhigh):
GPIO.output(humpin,GPIO.HIGH) # turn off humidity
humidityON = 0
print("humidity off")
elif humidity< humlow:
GPIO.output(humpin,GPIO.LOW) # turn on humidity
humidityON = 1
print("humidity on")
# print the temp
try:
display = SevenSegment.SevenSegment()
display.begin()
display.clear()
time.sleep(1.0)
display.print_float(temperature)
display.write_display()
except:
print("display write error")
# print a status
print temperature
print x
# write to adafruit
try:
aio.send('fridge_temp',temperature)
aio.send('fridge_humidity',humidity)
aio.send('fridge_on', fridgeON)
aio.send('humidity_on',humidityON)
except:
print("adafruit send error")
# sleep for a bit
time.sleep(30)
#all this below is useless.
# Note that sometimes you won't get a reading and
# the results will be null (because Linux can't
# guarantee the timing of calls to read the sensor).
# If this happens try again!
if humidity is not None and temperature is not None:
aio.send('desk_temp',temperature)
aio.send('desk_humidity',humidity)
print('Temp={0:0.1f}* Humidity={1:0.1f}%'.format(temperature, humidity)
)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
15069,
357,
66,
8,
1946,
1215,
1878,
4872,
20171,
198,
2,
6434,
25,
8832,
6031,
28635,
198,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4... | 2.800112 | 1,781 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
import StringIO
from py_vulcanize import fake_fs
from py_vulcanize import generate
from py_vulcanize import html_generation_controller
from py_vulcanize import html_module
from py_vulcanize import parse_html_deps
from py_vulcanize import project as project_module
from py_vulcanize import resource
from py_vulcanize import resource_loader as resource_loader
| [
2,
15069,
1946,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,
11748... | 3.45 | 160 |
# vim: tabstop=4 expandtab autoindent shiftwidth=4 fileencoding=utf-8
"""
Provides Nose and Django test case assert functions
"""
from django.test.testcases import TransactionTestCase
from django.core import mail
import re
## Python
from nose import tools
for t in dir(tools):
if t.startswith('assert_'):
vars()[t] = getattr(tools, t)
## Django
caps = re.compile('([A-Z])')
_t = Dummy('nop')
for at in [ at for at in dir(_t)
if at.startswith('assert') and not '_' in at ]:
pepd = pep8(at)
vars()[pepd] = getattr(_t, at)
del Dummy
del _t
del pep8
## New
def assert_code(response, status_code, msg_prefix=''):
"""Asserts the response was returned with the given status code
"""
if msg_prefix:
msg_prefix = '%s: ' % msg_prefix
assert response.status_code == status_code, \
'Response code was %d (expected %d)' % \
(response.status_code, status_code)
def assert_ok(response, msg_prefix=''):
"""Asserts the response was returned with status 200 (OK)
"""
return assert_code(response, 200, msg_prefix=msg_prefix)
def assert_mail_count(count, msg=None):
"""Assert the number of emails sent.
The message here tends to be long, so allow for replacing the whole
thing instead of prefixing.
"""
if msg is None:
msg = ', '.join([e.subject for e in mail.outbox])
msg = '%d != %d %s' % (len(mail.outbox), count, msg)
assert_equals(len(mail.outbox), count, msg)
# EOF
| [
2,
43907,
25,
7400,
11338,
28,
19,
4292,
8658,
8295,
521,
298,
6482,
10394,
28,
19,
2393,
12685,
7656,
28,
40477,
12,
23,
198,
198,
37811,
198,
15946,
1460,
47880,
290,
37770,
1332,
1339,
6818,
5499,
198,
37811,
198,
198,
6738,
42625,... | 2.550085 | 589 |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 13:16:25 2015
@author: hbanks
Brevity required, prurience preferred
"""
import os
import io
import glob
import errno
import copy
import json
import warnings
import numpy as np
from scipy.optimize import curve_fit
import scipy.interpolate as spi
import scipy.optimize as spo
import scipy.fftpack as fft
import matplotlib.pyplot as plt
import scipy.ndimage as ndimage
import itertools as itt
np.set_printoptions(linewidth=500)
# One of the main results is the HighSidebandCCD.sb_results array. These are the
# various mappings between index and real value
# I deally, this code should be converted to pandas to avoid this issue,
# but that's outside the scope of current work.
# [sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
# [ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
####################
# Objects
####################
# class LaserLineCCD(HighSidebandCCD):
# """
# Class for use when doing alinging/testing by sending the laser
# directly into the CCD. Modifies how "sidebands" and guess and fit,
# simply looking at the max signal.
# """
# def guess_sidebands(self, cutoff=8, verbose=False, plot=False):
# pass
class NeonNoiseAnalysis(CCD):
"""
This class is used to make handling neon calibration lines easier. It's not great.
"""
def noise_and_signal(self):
"""
This bad boy calculates the standard deviation of the space between the
neon lines.
The noise regions are, in nm:
high: 784-792
low1: 795-806
low2: 815-823
low3: 831-834
the peaks are located at, in nm:
#1, weak: 793.6
#2, medium: 794.3
#3, medium: 808.2
#4, weak: 825.9
#5, strong: 830.0
"""
print('\n\n')
self.ccd_data = np.flipud(self.ccd_data)
# self.high_noise_region = np.array(self.ccd_data[30:230, :])
self.high_noise_region = np.array(self.ccd_data[80:180, :]) # for dark current measurements
self.low_noise_region1 = np.array(self.ccd_data[380:700, :])
self.low_noise_region2 = np.array(self.ccd_data[950:1200, :])
self.low_noise_region3 = np.array(self.ccd_data[1446:1546, :])
# self.high_noise = np.std(self.high_noise_region[:, 1])
self.high_noise_std = np.std(self.high_noise_region[:, 1])
self.high_noise_sig = np.mean(self.high_noise_region[:, 1])
self.low_noise1 = np.std(self.low_noise_region1[:, 1])
self.low_noise2 = np.std(self.low_noise_region2[:, 1])
self.low_noise_std = np.std(self.low_noise_region2[:, 1])
self.low_noise_sig = np.mean(self.low_noise_region2[:, 1])
self.low_noise3 = np.std(self.low_noise_region3[:, 1])
# self.noise_list = [self.high_noise, self.low_noise1, self.low_noise2, self.low_noise3]
self.peak1 = np.array(self.ccd_data[303:323, :])
self.peak2 = np.array(self.ccd_data[319:339, :])
self.peak3 = np.array(self.ccd_data[736:746, :])
self.peak4 = np.array(self.ccd_data[1268:1288, :])
self.peak5 = np.array(self.ccd_data[1381:1421, :])
temp_max = np.argmax(self.peak1[:, 1])
self.signal1 = np.sum(self.peak1[temp_max - 1:temp_max + 2, 1])
self.error1 = np.sqrt(np.sum(self.peak1[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak2[:, 1])
self.signal2 = np.sum(self.peak2[temp_max - 1:temp_max + 2, 1])
self.error2 = np.sqrt(np.sum(self.peak2[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak3[:, 1])
self.signal3 = np.sum(self.peak3[temp_max - 1:temp_max + 2, 1])
self.error3 = np.sqrt(np.sum(self.peak3[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak4[:, 1])
self.signal4 = np.sum(self.peak4[temp_max - 1:temp_max + 2, 1])
self.error4 = np.sqrt(np.sum(self.peak4[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak5[:, 1])
self.signal5 = np.sum(self.peak5[temp_max - 1:temp_max + 2, 1])
self.error5 = np.sqrt(np.sum(self.peak5[temp_max - 1:temp_max + 2, 2] ** 2))
self.signal_list = [self.signal1, self.signal2, self.signal3, self.signal4, self.signal5]
self.error_list = [self.error1, self.error2, self.error3, self.error4, self.error5]
print("Signal list:", self.signal_list)
self.ccd_data = np.flipud(self.ccd_data)
def process_stuff(self):
"""
This one puts high_noise, low_noise1, signal2, and error2 in a nice horizontal array
"""
# self.results = np.array([self.high_noise, self.low_noise1, self.signal5, self.error5])
# average = np.mean([self.low_noise1, self.low_noise2, self.low_noise3])
# self.results = np.array([self.high_noise, self.low_noise1, self.low_noise2, self.low_noise3, self.high_noise/average])
self.results = np.array([self.high_noise_sig, self.high_noise_std, self.low_noise_sig, self.low_noise_std])
def collect_noise(neon_list, param_name, folder_name, file_name, name='Signal'):
"""
This function acts like save parameter sweep.
param_name = string that we're gonna save!
"""
# param_array = None
for elem in neon_list:
print("pname: {}".format(elem.parameters[param_name]))
print("results:", elem.results)
temp = np.insert(elem.results, 0, elem.parameters[param_name])
try:
param_array = np.row_stack((param_array, temp))
except UnboundLocalError:
param_array = np.array(temp)
if len(param_array.shape) == 1:
print("I don't think you want this file")
return
# append the relative peak error
print('\n', param_array, '\n')
param_array = np.column_stack((param_array, param_array[:, 4] / param_array[:, 3]))
# append the snr
param_array = np.column_stack((param_array, param_array[:, 3] / param_array[:, 2]))
try:
param_array = param_array[param_array[:, 0].argsort()]
except:
print("param_array shape", param_array.shape)
raise
try:
os.mkdir(folder_name)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
file_name = file_name + '.txt'
origin_import1 = param_name + ",Noise,Noise,Signal,error,rel peak error,peak signal-to-noise"
# origin_import1 = param_name + ",Noise,Noise,Noise,Noise,Ratio"
origin_import2 = ",counts,counts,counts,counts,,"
# origin_import2 = ",counts,counts,counts,,"
origin_import3 = ",High noise region,Low noise region,{},{} error,{} rel error, {}".format(name, name, name, name)
# origin_import3 = ",High noise region,Low noise region 1,Low noise region 2,Low noise region 3,High/low"
header_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
# print "Spec header: ", spec_header
print("the param_array is:", param_array)
np.savetxt(os.path.join(folder_name, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
print("Saved the file.\nDirectory: {}".format(os.path.join(folder_name, file_name)))
class HighSidebandCCDRaw(HighSidebandCCD):
"""
This class is meant for passing in an image file (currently supports a 2x1600)
Which it does all the processing on.
"""
@staticmethod
def gen_wavelengths(center_lambda, grating):
'''
This returns a 1600 element list of wavelengths for each pixel in the EMCCD based on grating and center wavelength
grating = which grating, 1 or 2
center = center wavelength in nanometers
'''
b = 0.75 # length of spectrometer, in m
k = -1.0 # order looking at
r = 16.0e-6 # distance between pixles on CCD
if grating == 1:
d = 1. / 1800000.
gamma = 0.213258508834
delta = 1.46389935365
elif grating == 2:
d = 1. / 1200000.
gamma = 0.207412628027
delta = 1.44998344749
elif grating == 3:
d = 1. / 600000.
gamma = 0.213428934011
delta = 1.34584754696
else:
print("What a dick, that's not a valid grating")
return None
center = center_lambda * 10 ** -9
wavelength_list = np.arange(-799.0, 801.0)
output = d * k ** (-1) * ((-1) * np.cos(delta + gamma + (-1) * np.arccos(
(-1 / 4) * (1 / np.cos((1 / 2) * gamma)) ** 2 * (
2 * (np.cos((1 / 2) * gamma) ** 4 * (2 + (-1) * d ** (-2) * k ** 2 * center ** 2 + 2 * np.cos(gamma))) ** (
1 / 2) + d ** (-1) * k * center * np.sin(gamma))) + np.arctan(
b ** (-1) * (r * wavelength_list + b * np.cos(delta + gamma)) * (1 / np.sin(delta + gamma)))) + (
1 + (-1 / 16) * (1 / np.cos((1 / 2) * gamma)) ** 4 * (2 * (
np.cos((1 / 2) * gamma) ** 4 * (
2 + (-1) * d ** (-2) * k ** 2 * center ** 2 + 2 * np.cos(gamma))) ** (1 / 2) + d ** (
-1) * k * center * np.sin(
gamma)) ** 2) ** (1 / 2))
output = (output + center) * 10 ** 9
return output
class HighSidebandPMTOld(PMT):
"""
Old version: Replaced March 01, 2017
Class initialized by loading in data set.
Multiple copies of the same sideband were stacked as raw data and combined,
effectively causing (2) 10-pt scans to be treated the same as (1) 20pt scan.
This works well until you have photon counted pulses.
"""
def __init__(self, file_path, verbose=False):
"""
Initializes a SPEX spectrum. It'll open a single file, then read
the data from that file using .add_sideband(). The super's init will handle the parameters
and the description.
attributes:
self.parameters - dictionary of important experimental parameters, created in PMT
self.sb_dict - keys are sideband order, values are PMT data arrays
self.sb_list - sorted list of included sidebands
:param file_path: path to the current file
:type file_path: str
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return:
"""
super(HighSidebandPMT, self).__init__(
file_path) # Creates the json parameters dictionary
self.fname = file_path
self.parameters["files included"] = [file_path]
with open(file_path, 'r') as f:
sb_num = int(f.readline()[1:])
raw_temp = np.genfromtxt(file_path, comments='#', delimiter=',')[3:, :]
self.initial_sb = sb_num
self.initial_data = np.array(raw_temp)
self.sb_dict = {sb_num: np.array(raw_temp)}
self.sb_list = [sb_num]
def add_sideband(self, other):
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object. It handles
when you measure the same sideband twice. It assumes both are equally "good"
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could, if
you have two incomplete dictionaries
:param other: the new sideband data to add to the larger spectrum. Add means append, no additino is performed
:type other: HighSidebandPMT
:return:
"""
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could
"""
self.parameters["files included"].append(other.fname)
if other.initial_sb in self.sb_list:
self.sb_list.append(other.initial_sb)
# Make things comma delimited?
try:
self.sb_dict[other.initial_sb].vstack((other.initial_data))
except:
self.sb_dict[other.initial_sb] = np.array(other.initial_data)
def process_sidebands(self, verbose=False):
"""
This bad boy will clean up the garbled mess that is the object before hand,
including clearing out misfired shots and doing the averaging.
Affects:
self.sb_dict = Averages over sidebands
Creates:
self.sb_list = The sideband orders included in this object.
:param verbose: Flag to see the nitty gritty details.
:type verbose: bool
:return: None
"""
for sb_num, sb in list(self.sb_dict.items()):
if sb_num == 0:
fire_condition = -np.inf # This way the FEL doesn't need to be on during laser line measurement
else:
fire_condition = np.mean(sb[:, 2]) / 2 # Say FEL fired if the
# cavity dump signal is
# more than half the mean
# of the cavity dump signal
frequencies = sorted(list(set(sb[:, 0])))
temp = None
for freq in frequencies:
data_temp = np.array([])
for point in sb:
if point[0] == freq and point[2] > fire_condition:
data_temp = np.hstack((data_temp, point[3]))
try:
temp = np.vstack(
(temp, np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])))
except:
temp = np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])
temp[:, 0] = temp[:, 0] / 8065.6 # turn NIR freq into eV
temp = temp[temp[:, 0].argsort()]
self.sb_dict[sb_num] = np.array(temp)
self.sb_list = sorted(self.sb_dict.keys())
if verbose:
print("Sidebands included", self.sb_list)
def integrate_sidebands(self, verbose=False):
"""
This method will integrate the sidebands to find their strengths, and then
use a magic number to define the width, since they are currently so utterly
undersampled for fitting.
It is currently the preferred method for calculating sideband strengths.
self.fit_sidebands is probably better with better-sampled lines.
Creates:
self.sb_results = full list of integrated data. Column order is:
[sb order, Freq (eV), "error" (eV), Integrate area (arb.), area error, "Linewidth" (eV), "Linewidth error" (eV)
self.full_dict = Dictionary where the SB order column is removed and turned into the keys. The values
are the rest of that sideband's results.
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
self.full_dict = {}
for sideband in list(self.sb_dict.items()):
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
area = np.trapz(np.nan_to_num(sideband[1][:, 1]), sideband[1][:, 0])
error = np.sqrt(np.sum(np.nan_to_num(
sideband[1][:, 2]) ** 2)) / 8065.6 # Divide by the step size?
if verbose:
print("order", sideband[0])
print("area", area)
print("error", error)
print("ratio", area / error)
details = np.array(
[sideband[0], nir_frequency, 1 / 8065.6, area, error, 2 / 8065.6,
1 / 8065.6])
if area < 0:
if verbose:
print("area less than 0", sideband[0])
continue
elif area < 1.5 * error: # Two seems like a good cutoff?
if verbose:
print("I did not keep sideband ", sideband[0])
continue
try:
self.sb_results = np.vstack((self.sb_results, details))
except:
self.sb_results = np.array(details)
self.full_dict[sideband[0]] = details[1:]
try:
self.sb_results = self.sb_results[self.sb_results[:, 0].argsort()]
except (IndexError, AttributeError):
# IndexError where there's only one sideband
# AttributeError when there aren't any (one sb which wasn't fit)
pass
def fit_sidebands(self, plot=False, verbose=False):
"""
This method will fit a gaussian to each of the sidebands provided in
the self.sb_dict and make a list just like in the EMCCD version. It
will also use the standard error of the integral of the PMT peak as the
error of the gaussian area instead of that element from the covariance
matrix. Seems more legit.
attributes:
self.sb_results: the numpy array that contains all of the fit info just
like it does in the CCD class.
self.full_dict = A dictionary version of self.sb_results
:param plot: Flag to see the results plotted
:type plot: bool
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
sb_fits = {}
for sideband in list(self.sb_dict.items()):
if verbose:
print("Sideband number", sideband[0])
print("Sideband data:\n", sideband[1])
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
peak = sideband[1][index, 1]
width_guess = 0.0001 # Yep, another magic number
p0 = [nir_frequency, peak * width_guess, width_guess, 0.00001]
if verbose:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *p0),
label="fit :{}".format(sideband[1]))
print("p0:", p0)
try:
coeff, var_list = curve_fit(gauss, sideband[1][:, 0], sideband[1][:, 1],
sigma=sideband[1][:, 2], p0=p0)
coeff[1] = abs(coeff[1])
coeff[2] = abs(coeff[2])
if verbose:
print("coeffs:", coeff)
print("stdevs:", np.sqrt(np.diag(var_list)))
print("integral", np.trapz(sideband[1][:, 1], sideband[1][:, 0]))
if np.sqrt(np.diag(var_list))[0] / coeff[
0] < 0.5: # The error on where the sideband is should be small
sb_fits[sideband[0]] = np.concatenate(
(np.array([sideband[0]]), coeff, np.sqrt(np.diag(var_list))))
# print "error then:", sb_fits[sideband[0]][6]
relative_error = np.sqrt(sum([x ** 2 for x in
sideband[1][index - 1:index + 2,
2]])) / np.sum(
sideband[1][index - 1:index + 2, 1])
if verbose:
print("relative error:", relative_error)
sb_fits[sideband[0]][6] = coeff[1] * relative_error
# print "error now:", sb_fits[sideband[0]][6]
if plot:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *coeff))
# plt.plot(x_vals, gauss(x_vals, *p0))
else:
print("what happened?")
except:
print("God damn it, Leroy.\nYou couldn't fit this.")
sb_fits[sideband[0]] = None
for result in sorted(sb_fits.keys()):
try:
self.sb_results = np.vstack((self.sb_results, sb_fits[result]))
except:
self.sb_results = np.array(sb_fits[result])
self.sb_results = self.sb_results[:, [0, 1, 5, 2, 6, 3, 7, 4, 8]]
self.sb_results = self.sb_results[:, :7]
if verbose:
print("And the results, please:\n", self.sb_results)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def laser_line(self, verbose=False):
"""
This method is designed to scale everything in the PMT to the conversion
efficiency based on our measurement of the laser line with a fixed
attenuation.
Creates:
self.parameters['normalized?'] = Flag to specify if the laser has been
accounted for.
:return: None
"""
if 0 not in self.sb_list:
self.parameters['normalized?'] = False
return
else:
laser_index = np.where(self.sb_results[:, 0] == 0)[0][0]
if verbose:
print("sb_results", self.sb_results[laser_index, :])
print("laser_index", laser_index)
laser_strength = np.array(self.sb_results[laser_index, 3:5])
if verbose:
print("Laser_strength", laser_strength)
for sb in self.sb_results:
sb[4] = (sb[3] / laser_strength[0]) * np.sqrt(
(sb[4] / sb[3]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[3] = sb[3] / laser_strength[0]
for sb in list(self.full_dict.values()):
sb[3] = (sb[2] / laser_strength[0]) * np.sqrt(
(sb[3] / sb[2]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[2] = sb[2] / laser_strength[0]
self.parameters['normalized?'] = True
def save_processing(self, file_name, folder_str, marker='', index=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it. Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
# self.parameters["files included"] = list(self.files)
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: PMT.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count(
'#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.\n,{:.3f},'.format(
self.parameters["fieldStrength"]["mean"])
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = '\nCenter energy,error,Amplitude,error,Linewidth,error\neV,,arb. u.,,eV,,\n,,' # + marker
fits_header = '#' + parameter_str + origin_import_fits
for sideband in sorted(self.sb_dict.keys()):
try:
complete = np.vstack((complete, self.sb_dict[sideband]))
except:
complete = np.array(self.sb_dict[sideband])
np.savetxt(os.path.join(folder_str, spectra_fname), complete, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
try:
np.savetxt(os.path.join(folder_str, fit_fname), self.sb_results,
delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
except AttributeError:
# Catch the error that happens if you save something without files
print("warning, couldn't save fit file (no sidebands found?)")
print("Saved PMT spectrum.\nDirectory: {}".format(
os.path.join(folder_str, spectra_fname)))
class TimeTrace(PMT):
"""
This class will be able to handle time traces output by the PMT softare.
"""
class FullAbsorbance(FullSpectrum):
"""
I'm imagining this will sew up absorption spectra, but I'm not at all sure
how to do that at the moment.
"""
class FullHighSideband(FullSpectrum):
"""
I'm imagining this class is created with a base CCD file, then gobbles up
other spectra that belong with it, then grabs the PMT object to normalize
everything, assuming that PMT object exists.
"""
def __init__(self, initial_CCD_piece):
"""
Initialize a full HSG spectrum. Starts with a single CCD image, then
adds more on to itself using stitch_hsg_dicts.
Creates:
self.fname = file name of the initial_CCD_piece
self.sb_results = The sideband details from the initializing data
self.parameters = The parameter dictionary of the initializing data. May
not have all details of spectrum pieces added later.
self.full_dict = a copy of the sb_results without the zeroth column, which
is SB order
:param initial_CCD_piece: The starting part of the spectrum, often the lowest orders seen by CCD
:type initial_CCD_piece: HighSidebandCCD
:return: None
"""
self.fname = initial_CCD_piece.fname
try:
self.sb_results = initial_CCD_piece.sb_results
except AttributeError:
print(initial_CCD_piece.full_dict)
raise
self.parameters = initial_CCD_piece.parameters
self.parameters['files_here'] = [initial_CCD_piece.fname.split('/')[-1]]
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
@staticmethod
def parse_sb_array(arr):
"""
Check to make sure the first even order sideband in an array is not weaker
than the second even order. If this happens, it's likely because the SB was in
the short pass filter and isn't work counting.
We cut it out to prevent it from itnerfering with calculating overlaps
:param arr:
:return:
"""
arr = np.array(arr)
if (arr[0, sbarr.SBNUM]>0 and arr[1, sbarr.SBNUM]>0 and # make sure they're both pos
arr[0, sbarr.AREA] < arr[1, sbarr.AREA]): # and the fact the area is less
# print "REMOVING FIRST SIDEBAND FROM FULLSIDEBAND"
# print arr[0]
# print arr[1]
arr = arr[1:]
full_dict = {}
for sb in arr:
full_dict[sb[0]] = np.asarray(sb[1:])
return full_dict, arr
def add_CCD(self, ccd_object, verbose=False, force_calc=None, **kwargs):
"""
This method will be called by the stitch_hsg_results function to add another
CCD image to the spectrum.
:param ccd_object: The CCD object that will be stiched into the current FullHighSideband object
:type ccd_object: HighSidebandCCD
:return: None
"""
if self.parameters["gain"] == ccd_object.parameters["gain"]:
calc = False
else:
calc = True
if force_calc is not None:
calc = force_calc
if "need_ratio" in kwargs: #cascading it through, starting to think
# everything should be in a kwarg
calc = kwargs.pop("need_ratio")
try:
# self.full_dict = stitch_hsg_dicts(self.full_dict, ccd_object.full_dict,
# need_ratio=calc, verbose=verbose)
self.full_dict = stitch_hsg_dicts(self, ccd_object, need_ratio=calc,
verbose=verbose, **kwargs)
self.parameters['files_here'].append(ccd_object.fname.split('/')[-1])
# update sb_results, too
sb_results = [[k]+list(v) for k, v in list(self.full_dict.items())]
sb_results = np.array(sb_results)
self.sb_results = sb_results[sb_results[:,0].argsort()]
except AttributeError:
print('Error, not enough sidebands to fit here! {}, {}, {}, {}'.format(
self.parameters["series"], self.parameters["spec_step"],
ccd_object.parameters["series"], ccd_object.parameters["spec_step"]
))
def add_PMT(self, pmt_object, verbose=False):
"""
This method will be called by the stitch_hsg_results function to add the PMT
data to the spectrum.
"""
# print "I'm adding PMT once"
# self.full_dict = stitch_hsg_dicts(pmt_object.full_dict, self.full_dict,
# need_ratio=True, verbose=False)
self.full_dict = stitch_hsg_dicts(pmt_object, self,
need_ratio=True, verbose=verbose)
# if verbose:
# self.full_dict, ratio = self.full_dict
# print "I'm done adding PMT data"
self.parameters['files_here'].append(pmt_object.parameters['files included'])
self.make_results_array()
# if verbose:
# return ratio
def make_results_array(self):
"""
The idea behind this method is to create the sb_results array from the
finished full_dict dictionary.
"""
self.sb_results = None
# print "I'm making the results array:", sorted(self.full_dict.keys())
for sb in sorted(self.full_dict.keys()):
# print "Going to add this", sb
try:
self.sb_results = np.vstack((self.sb_results, np.hstack((sb, self.full_dict[sb]))))
except ValueError:
# print "It didn't exist yet!"
self.sb_results = np.hstack((sb, self.full_dict[sb]))
# print "and I made this array:", self.sb_results[:, 0]
def save_processing(self, file_name, folder_str, marker='', index='', verbose=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
fit_fname = file_name + '_' + marker + '_' + str(index) + '_full.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files, one that is self.proc_data, the other is self.sb_results
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
temp = np.array(self.sb_results)
ampli = np.array([temp[:, 3] / temp[:, 5]]) # I'm pretty sure this is
# amplitude, not area
temp[:, 5:7] = temp[:, 5:7] * 1000 # For meV linewidths
if verbose:
print("sb_results", self.sb_results.shape)
print("ampli", ampli.shape)
save_results = np.hstack((temp, ampli.T))
# spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_full.txt'
# self.save_name = spectra_fname
# self.parameters['addenda'] = self.addenda
# self.parameters['subtrahenda'] = self.subtrahenda
try:
# PMT files add unnecessary number of lines, dump it into one line
# by casting it to a string.
reduced = self.parameters.copy()
reduced["files_here"] = str(reduced["files_here"])
parameter_str = json.dumps(reduced, sort_keys=True, indent=4, separators=(',', ': '))
except Exception as e:
print(e)
print("Source: EMCCD_image.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count('#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
# origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.'
# spec_header = '#' + parameter_str + '\n#' + self.description[:-2] + origin_import_spec
origin_import_fits = '\nSideband,Center energy,error,Sideband strength,error,Linewidth,error,Amplitude'+\
'\norder,eV,,arb. u.,,meV,,arb. u.\n' + ','.join([marker]*8)
fits_header = '#' + parameter_str + origin_import_fits
# np.savetxt(os.path.join(folder_str, spectra_fname), self.proc_data, delimiter=',',
# header=spec_header, comments='', fmt='%f')
np.savetxt(os.path.join(folder_str, fit_fname), save_results, delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
if verbose:
print("Save image.\nDirectory: {}".format(os.path.join(folder_str, fit_fname)))
####################
# Fitting functions
####################
def gauss(x, *p):
"""
Gaussian fit function.
:param x: The independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, y offset] to be unpacked
:type p: list of floats or ints
:return: Depends on x, returns another np.array or float or int
:rtype: type(x)
"""
mu, A, sigma, y0 = p
return (A / sigma) * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2)) + y0
def lingauss(x, *p):
"""
Gaussian fit function with a linear offset
:param x: The independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, constant offset of background, slope of background] to be unpacked
:type p: list of floats or ints
:return: Depends on x, returns another np.array or float or int
:rtype: type(x)
"""
mu, A, sigma, y0, m = p
return (A / sigma) * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2)) + y0 + m * x
def lorentzian(x, *p):
"""
Lorentzian fit with constant offset
:param x: The independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, constant offset of background, slope of background] to be unpacked
:type p: list of floats or ints
:return: Depends on x, returns another np.array or float or int
:rtype: type(x)
"""
mu, A, gamma, y0 = p
return (A / np.pi) * (gamma / ((x - mu) ** 2 + gamma ** 2)) + y0
def background(x, *p):
"""
Arbitrary pink-noise model background data for absorbance FFT
for the intention of replacing a peak in the FFT
with the background
:param x: The independent variable
:type x: np.array, or int or float
:param p: [proportionality factor, exponent of power law]
:type p: list of floats or ints
:return: Depends on x
:rtype: type(x)
"""
a, b = p
return a * (1 / x) ** b
def gaussWithBackground(x, *p):
"""
Gaussian with pink-noise background function
:param x: independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, constant background, proportionality of power law, exponent of power law]
:type p: list of floats or ints
:return: Depends on x
:rtype: type(x)
"""
pGauss = p[:4]
a, b = p[4:]
return gauss(x, *pGauss) + background(x, a, b)
####################
# Collection functions
####################
def hsg_combine_spectra(spectra_list, verbose = False, **kwargs):
"""
This function is all about smooshing different parts of the same hsg
spectrum together. It takes a list of HighSidebandCCD spectra and turns the
zeroth spec_step into a FullHighSideband object. It then uses the function
stitch_hsg_dicts over and over again for the smooshing.
Input:
spectra_list = list of HighSidebandCCD objects that have sideband spectra
larger than the spectrometer can see.
Returns:
good_list = A list of FullHighSideband objects that have been combined as
much as can be.
:param spectra_list: randomly-ordered list of HSG spectra, some of which can be stitched together
:type spectra_list: List of HighSidebandCCD objects
kwargs gets passed onto add_item
:return: fully combined list of full hsg spectra. No PMT business yet.
:rtype: list of FullHighSideband
"""
good_list = []
spectra_list = spectra_list.copy()
spectra_list.sort(key=lambda x: x.parameters["spec_step"])
# keep a dict for each series' spec step
# This allows you to combine spectra whose spec steps
# change by values other than 1 (2, if you skip, or 0.5 if you
# decide to insert things, or arbitary strings)
spec_steps = {}
for elem in spectra_list:
# if verbose:
# print "Spec_step is", elem.parameters["spec_step"]
current_steps = spec_steps.get(elem.parameters["series"], [])
current_steps.append(elem.parameters["spec_step"])
spec_steps[elem.parameters["series"]] = current_steps
if verbose:
print("I found these spec steps for each series:")
print("\n\t".join("{}: {}".format(*ii) for ii in spec_steps.items()))
# sort the list of spec steps
for series in spec_steps:
spec_steps[series].sort()
same_freq = lambda x,y: x.parameters["fel_lambda"] == y.parameters["fel_lambda"]
for index in range(len(spectra_list)):
try:
temp = spectra_list.pop(0)
if verbose:
print("\nStarting with this guy", temp, "\n")
except:
break
good_list.append(FullHighSideband(temp))
counter = 1
temp_list = list(spectra_list)
for piece in temp_list:
if verbose:
print("\tchecking this spec_step", piece.parameters["spec_step"], end=' ')
print(", the counter is", counter)
if not same_freq(piece, temp):
if verbose:
print("\t\tnot the same fel frequencies ({} vs {})".format(piece.parameters["fel_lambda"], temp.parameters["fel_lambda"]))
continue
if temp.parameters["series"] == piece.parameters["series"]:
if piece.parameters["spec_step"] == spec_steps[temp.parameters["series"]][counter]:
if verbose:
print("I found this one", piece)
counter += 1
good_list[-1].add_CCD(piece, verbose=verbose, **kwargs)
spectra_list.remove(piece)
else:
print("\t\tNot the right spec step?", type(piece.parameters["spec_step"]))
else:
if verbose:
print("\t\tNot the same series ({} vs {}".format(
piece.parameters["series"],temp.parameters["series"]))
good_list[-1].make_results_array()
return good_list
def hsg_combine_spectra_arb_param(spectra_list, param_name="series", verbose = False):
"""
This function is all about smooshing different parts of the same hsg
spectrum together. It takes a list of HighSidebandCCD spectra and turns the
zeroth spec_step into a FullHighSideband object. It then uses the function
stitch_hsg_dicts over and over again for the smooshing.
This is different than hsg_combine_spectra in that you pass which
criteria distinguishes the files to be the "same". Since it can be any arbitrary
value, things won't be exactly the same (field strength will never be identical
between images). It will start with the first (lowest) spec step, then compare the
number of images in the next step. Whichever has
Input:
spectra_list = list of HighSidebandCCD objects that have sideband spectra
larger than the spectrometer can see.
Returns:
good_list = A list of FullHighSideband objects that have been combined as
much as can be.
:param spectra_list: randomly-ordered list of HSG spectra, some of which can be stitched together
:type spectra_list: list of HighSidebandCCD
:return: fully combined list of full hsg spectra. No PMT business yet.
:rtype: list of FullHighSideband
"""
if not spectra_list:
raise RuntimeError("Passed an empty spectra list!")
if isinstance(param_name, list):
# if you pass two things because the param you want
# is in a dict (e.g. field strength has mean/std)
# do it that way
param_name_list = list(param_name)
paramGetter = lambda x: x.parameters[param_name_list[0]][param_name_list[1]]
param_name = param_name[0]
elif isinstance(spectra_list[0].parameters[param_name], dict):
paramGetter = lambda x: x.parameters[param_name]["mean"]
else:
paramGetter = lambda x: x.parameters[param_name]
good_list = []
spectra_list.sort(key=lambda x: x.parameters["spec_step"])
# keep a dict for each spec step.
spec_steps = {}
for elem in spectra_list:
if verbose:
print("Spec_step is", elem.parameters["spec_step"])
current_steps = spec_steps.get(elem.parameters["spec_step"], [])
current_steps.append(elem)
spec_steps[elem.parameters["spec_step"]] = current_steps
# Next, loop over all of the elements. For each element, if it has not
# already been added to a spectra, look at all of the combinations from
# other spec steps to figure out which has the smallest overall deviation
# to make a new full spectrum
good_list = []
already_added = set()
for elem in spectra_list:
if elem in already_added: continue
already_added.add(elem)
good_list.append(FullHighSideband(elem))
other_spec_steps = [v for k, v in list(spec_steps.items()) if
k != good_list[-1].parameters["spec_step"]]
min_distance = np.inf
cur_value = paramGetter(good_list[-1])
best_match = None
for comb in itt.product(*other_spec_steps):
new_values = list(map(paramGetter, comb))
all_values = new_values + [cur_value]
if np.std(all_values) < min_distance:
min_distance = np.std(all_values)
best_match = list(comb)
if best_match is None:
raise RuntimeError("No matches found. Empty lists passed?")
best_values = list(map(paramGetter, best_match))
for spec in best_match:
print("Adding new spec step\n\tStarted with spec={},series={}".format(
good_list[-1].parameters["spec_step"],good_list[-1].parameters["series"]
))
print("\tAdding with spec={},series={}\n".format(
spec.parameters["spec_step"],
spec.parameters["series"]
))
print("\n\nfirst SBs:\n", good_list[-1].sb_results)
print("\n\nsecond SBs:\n", spec.sb_results)
good_list[-1].add_CCD(spec, True)
print("\n\nEnding SBs:\n", good_list[-1].sb_results)
already_added.add(spec)
best_match.append(good_list[-1])
best_values.append(cur_value)
new_value = np.mean(best_values)
new_std = np.std(best_values)
if isinstance(good_list[-1].parameters[param_name], dict):
best_values = np.array([x.parameters[param_name]["mean"] for x in best_match])
best_std = np.array([x.parameters[param_name]["std"] for x in best_match])
new_value = np.average(best_values, weights = best_std)
new_std = np.sqrt(np.average((best_values-new_value)**2, weights=best_std))
good_list[-1].parameters[param_name] = {
"mean": new_value,
"std": new_std
}
return good_list
def pmt_sorter(folder_path, plot_individual = True):
"""
This function will be fed a folder with a bunch of PMT data files in it.
The folder should contain a bunch of spectra with at least one sideband in
them, each differing by the series entry in the parameters dictionary.
This function will return a list of HighSidebandPMT objects.
:param folder_path: Path to a folder containing a bunch of PMT data, can be
part of a parameter sweep
:type folder_path: str
:param plot_individual: Whether to plot each sideband itself
:return: A list of all the possible hsg pmt spectra, organized by series tag
:rtype: list of HighSidebandPMT
"""
file_list = glob.glob(os.path.join(folder_path, '*[0-9].txt'))
pmt_list = []
plot_sb = lambda x: None
if plot_individual:
plt.figure("PMT data")
for sb_file in file_list:
temp = HighSidebandPMT(sb_file)
plot_sb(temp)
try:
for pmt_spectrum in pmt_list: # pmt_spectrum is a pmt object
if temp.parameters['series'] == pmt_spectrum.parameters['series']:
pmt_spectrum.add_sideband(temp)
break
else: # this will execute IF the break was NOT called
pmt_list.append(temp)
except:
pmt_list.append(temp)
# for sb_file in file_list:
# with open(sb_file,'rU') as f:
# param_str = ''
# line = f.readline()
# line = f.readline()
# while line[0] == '#':
# param_str += line[1:]
# line = f.readline()
#
# parameters = json.loads(param_str)
# try:
# for pmt_spectrum in pmt_list: # pmt_spectrum is a pmt object?
# if parameters['series'] == pmt_spectrum.parameters['series']:
# pmt_spectrum.add_sideband(sb_file)
# break
# else: # this will execute IF the break was NOT called
# pmt_list.append(HighSidebandPMT(sb_file))
# except:
# pmt_list.append(HighSidebandPMT(sb_file))
for pmt_spectrum in pmt_list:
pmt_spectrum.process_sidebands()
return pmt_list
def hsg_combine_qwp_sweep(path, loadNorm = True, save = False, verbose=False,
skipOdds = True):
"""
Given a path to data taken from rotating the QWP (doing polarimetry),
process the data (fit peaks), and parse it into a matrix of sb strength vs
QWP angle vs sb number.
By default, saves the file into "Processed QWP Dependence"
Return should be passed directly into fitting
-1 | SB1 | SB1 | SB2 | SB2 | ... | ... | SBn | SBn |
angle1 | SB Strength | SB err | SB Strength | SB Err |
angle2 | ... | . |
.
.
.
:param path: Path to load
:param loadNorm: if true, load the normalized data
:param save: Save the processed file or not
:param verbose:
:param skipOdds: Passed on to save sweep; determine whether or not to save
odd orders. Generally, odds are artifacts and I don't want
them messing up the data, so default to True.
:return:
"""
def getData(fname):
"""
Helper function for loading the data and getting the header information for incident NIR stuff
:param fname:
:return:
"""
if isinstance(fname, str):
if loadNorm:
ending = "_norm.txt"
else:
ending = "_snip.txt"
header = ''
with open(os.path.join("Processed QWP Dependence", fname + ending)) as fh:
ln = fh.readline()
while ln[0] == '#':
header += ln[1:]
ln = fh.readline()
data = np.genfromtxt(os.path.join("Processed QWP Dependence", fname + ending),
delimiter=',', dtype=str)
if isinstance(fname, io.BytesIO):
header = b''
ln = fname.readline()
while ln.decode()[0] == '#':
header += ln[1:]
ln = fname.readline()
fname.seek(0)
data = np.genfromtxt(fname,
delimiter=',', dtype=str)
header = json.loads(header)
return data, float(header["lAlpha"]), float(header["lGamma"]), float(header["nir"]), float(header["thz"])
######### End getData
try:
sbData, lAlpha, lGamma, nir, thz = getData(path)
except:
# Do the processing on all the files
specs = proc_n_plotCCD(path, keep_empties=True, verbose=verbose)
for sp in specs:
try:
sp.parameters["series"] = round(float(sp.parameters["rotatorAngle"]), 2)
except KeyError:
# Old style of formatting
sp.parameters["series"] = round(float(sp.parameters["detectorHWP"]), 2)
specs = hsg_combine_spectra(specs, ignore_weaker_lowers=False)
if not save:
# If you don't want to save them, set everything up for doing Bytes objects
# to replacing saving files
full, snip, norm = io.BytesIO(), io.BytesIO(), io.BytesIO()
if "nir_pola" not in specs[0].parameters:
# in the olden days. Force them. Hopefully making them outside of ±360
# makes it obvious
specs[0].parameters["nir_pola"] = 361
specs[0].parameters["nir_polg"] = 361
keyName = "rotatorAngle"
if keyName not in specs[0].parameters:
# from back before I changed the name
keyName = "detectorHWP"
save_parameter_sweep(specs, [full, snip, norm], None,
keyName, "deg", wanted_indices=[3, 4],
header_dict={
"lAlpha": specs[0].parameters["nir_pola"],
"lGamma": specs[0].parameters["nir_polg"],
"nir": specs[0].parameters["nir_lambda"],
"thz": specs[0].parameters["fel_lambda"], },
only_even=skipOdds)
if loadNorm:
sbData, lAlpha, lGamma, nir, thz = getData(norm)
else:
sbData, lAlpha, lGamma, nir, thz = getData(snip)
else:
save_parameter_sweep(specs, os.path.basename(path), "Processed QWP Dependence",
"rotatorAngle", "deg", wanted_indices=[3, 4],
header_dict={
"lAlpha": specs[0].parameters["nir_pola"],
"lGamma": specs[0].parameters["nir_polg"],
"nir": specs[0].parameters["nir_lambda"],
"thz": specs[0].parameters["fel_lambda"], },
only_even=skipOdds)
sbData, lAlpha, lGamma, nir, thz = getData(os.path.basename(path))
laserParams = {
"lAlpha": lAlpha,
"lGamma": lGamma,
"nir": nir,
"thz": thz
}
# get which sidebands were found in this data set
# first two rows are origin header, second is sideband number
# (and empty strings, which is why the "if ii" below, to prevent
# ValueErrors on int('').
foundSidebands = np.array(sorted([float(ii) for ii in set(sbData[2]) if ii]))
# Remove first 3 rows, which are strings for origin header, and cast it to floats
sbData = sbData[3:].astype(float)
# double the sb numbers (to account for sb strength/error) and add a dummy
# number so the array is the same shape
foundSidebands = np.insert(foundSidebands, range(len(foundSidebands)), foundSidebands)
foundSidebands = np.insert(foundSidebands, 0, -1)
return laserParams, np.row_stack((foundSidebands, sbData))
def makeCurve(eta, isVertical):
"""
:param eta: QWP retardance at the wavelength
:return:
"""
cosd = lambda x: np.cos(x * np.pi / 180)
sind = lambda x: np.sin(x * np.pi / 180)
eta = eta * 2 * np.pi
if isVertical:
# vertical polarizer
else:
# vertical polarizer
return analyzerCurve
def proc_n_fit_qwp_data(data, laserParams = dict(), wantedSBs = None, vertAnaDir = True, plot=False,
save = False, plotRaw = lambda sbidx, sbnum: False, series = '', eta=None,
**kwargs):
"""
Fit a set of sideband data vs QWP angle to get the stoke's parameters
:param data: data in the form of the return of hsg_combine_qwp_sweep
:param laserParams: dictionary of the parameters of the laser, the angles and frequencies. See function for
expected keys. I don't think the errors are used (except for plotting?), or the wavelengths (but
left in for potential future use (wavelength dependent stuff?))
:param wantedSBs: List of the wanted sidebands to fit out.
:param vertAnaDir: direction of the analzyer. True if vertical, false if horizontal.
:param plot: True/False to plot alpha/gamma/dop. Alternatively, a list of "a", "g", "d" to only plot selected ones
:param save: filename to save the files. Accepts BytesIO
:param plotRaw: callable that takes an index of the sb and sb number, returns true to plot the raw curve
:param series: a string to be put in the header for the origin files
:param eta: a function to call to calculate the desired retardance. Input will be the SB order.
if saveStokes is in kwargs and False, it will not save the stokes parameters, since I rarely actually use them.
:return:
"""
defaultLaserParams = {
"lAlpha": 90,
"ldAlpha": 0.2,
"lGamma": 0.0,
"ldGamma": 0.2,
"lDOP": 1,
"ldDOP": 0.02,
"nir": 765.7155,
"thz": 21.1
}
defaultLaserParams.update(laserParams)
lAlpha, ldAlpha, lGamma, ldGamma, lDOP, ldDOP = defaultLaserParams["lAlpha"], \
defaultLaserParams["ldAlpha"], \
defaultLaserParams["lGamma"], \
defaultLaserParams["ldGamma"], \
defaultLaserParams["lDOP"], \
defaultLaserParams["ldDOP"]
allSbData = data
angles = allSbData[1:, 0]
# angles += -5
# print("="*20)
# print("\n"*3)
# print(" WARNING")
# print("\n"*3)
# print("ANGLES HAVE BEEN MANUALLY OFFEST IN proc_n_fit_qwp_data")
# print("\n"*3)
# print("="*20)
allSbData = allSbData[:, 1:] # trim out the angles
if wantedSBs is None:
# set to get rid of duplicates, 1: to get rid of the -1 used for
# getting arrays the right shape
wantedSBs = set(allSbData[0, 1:])
if eta is None:
"""
It might be easier for the end user to do this by passing eta(wavelength) instead of eta(sborder),
but then this function would need to carry around wavelengths, which is extra work. It could convert
between NIR/THz wavelengths to SB order, but it's currently unclear whether you'd rather use what the WS6
claims, or what the sidebands say, and you'd probably want to take the extra step to ensure the SB fit rseults
if using the spectromter wavelengths. In general, if you have a function as etal(wavelength), you'd probably
want to pass this as
eta = lambda x: etal(1239.84/(nirEv + x*THzEv))
assuming nirEv/THzEv are the photon energies of the NIR/THz.
"""
eta = lambda x: 0.25
# allow pasing a flag it ignore odds. I think I generally do, so set it to
# default to True
skipOdds = kwargs.get("skip_odds", True)
# Make an array to keep all of the sideband information.
# Start it off by keeping the NIR information (makes for easier plotting into origin)
sbFits = [[0] + [-1] * 8 + [lAlpha, ldAlpha, lGamma, ldGamma, lDOP, ldDOP]]
# Also, for convenience, keep a dictionary of the information.
# This is when I feel like someone should look at porting this over to pandas
sbFitsDict = {}
sbFitsDict["S0"] = [[0, -1, -1]]
sbFitsDict["S1"] = [[0, -1, -1]]
sbFitsDict["S2"] = [[0, -1, -1]]
sbFitsDict["S3"] = [[0, -1, -1]]
sbFitsDict["alpha"] = [[0, lAlpha, ldAlpha]]
sbFitsDict["gamma"] = [[0, lGamma, ldGamma]]
sbFitsDict["DOP"] = [[0, lDOP, ldDOP]]
# Iterate over all sb data. Skip by 2 because error bars are included
for sbIdx in range(0, allSbData.shape[1], 2):
sbNum = allSbData[0, sbIdx]
if sbNum not in wantedSBs: continue
if skipOdds and sbNum%2: continue
# if verbose:
# print("\tlooking at sideband", sbNum)
sbData = allSbData[1:, sbIdx]
sbDataErr = allSbData[1:, sbIdx + 1]
# try:
# p0 = sbFits[-1][1:8:2]
# except:
# p0 = [1, 1, 0, 0]
p0 = [1, 1, 0, 0]
etan = eta(sbNum)
try:
p, pcov = curve_fit(makeCurve(etan, vertAnaDir), angles, sbData, p0=p0)
except ValueError:
# This is getting tossed around, especially when looking at noisy data,
# especially with the laser line, and it's fitting erroneous values.
# Ideally, I should be cutting this out and not even returning them,
# but that's immedaitely causing
p = np.nan*np.array(p0)
pcov = np.eye(len(p))
if plot and plotRaw(sbIdx, sbNum):
# pg.figure("{}: sb {}".format(dataName, sbNum))
plt.figure("All Curves")
plt.errorbar(angles, sbData, sbDataErr, 'o-', name=f"{series}, {sbNum}")
# plt.plot(angles, sbData,'o-', label="Data")
fineAngles = np.linspace(angles.min(), angles.max(), 300)
# plt.plot(fineAngles,
# makeCurve(eta, "V" in dataName)(fineAngles, *p0), name="p0")
plt.plot(fineAngles,
makeCurve(etan, vertAnaDir)(fineAngles, *p))
# plt.show()
plt.ylim(0, 1)
plt.xlim(0, 360)
plt.ylabel("Normalized Intensity")
plt.xlabel("QWP Angle (θ)")
print(f"\t{series} {sbNum}, p={p}")
# get the errors
d = np.sqrt(np.diag(pcov))
thisData = [sbNum] + list(p) + list(d)
d0, d1, d2, d3 = d
S0, S1, S2, S3 = p
# reorder so errors are after values
thisData = [thisData[i] for i in [0, 1, 5, 2, 6, 3, 7, 4, 8]]
sbFitsDict["S0"].append([sbNum, S0, d0])
sbFitsDict["S1"].append([sbNum, S1, d1])
sbFitsDict["S2"].append([sbNum, S2, d2])
sbFitsDict["S3"].append([sbNum, S3, d3])
# append alpha value
thisData.append(np.arctan2(S2, S1) / 2 * 180. / np.pi)
# append alpha error
variance = (d2 ** 2 * S1 ** 2 + d1 ** 2 * S2 ** 2) / (S1 ** 2 + S2 ** 2) ** 2
thisData.append(np.sqrt(variance) * 180. / np.pi)
sbFitsDict["alpha"].append([sbNum, thisData[-2], thisData[-1]])
# append gamma value
thisData.append(np.arctan2(S3, np.sqrt(S1 ** 2 + S2 ** 2)) / 2 * 180. / np.pi)
# append gamma error
variance = (d3 ** 2 * (S1 ** 2 + S2 ** 2) ** 2 + (d1 ** 2 * S1 ** 2 + d2 ** 2 * S2 ** 2) * S3 ** 2) / (
(S1 ** 2 + S2 ** 2) * (S1 ** 2 + S2 ** 2 + S3 ** 2) ** 2)
thisData.append(np.sqrt(variance) * 180. / np.pi)
sbFitsDict["gamma"].append([sbNum, thisData[-2], thisData[-1]])
# append degree of polarization
thisData.append(np.sqrt(S1 ** 2 + S2 ** 2 + S3 ** 2) / S0)
variance = ((d1 ** 2 * S0 ** 2 * S1 ** 2 + d0 ** 2 * (S1 ** 2 + S2 ** 2 + S3 ** 2) ** 2 + S0 ** 2 * (
d2 ** 2 * S2 ** 2 + d3 ** 2 * S3 ** 2)) / (S0 ** 4 * (S1 ** 2 + S2 ** 2 + S3 ** 2)))
thisData.append(np.sqrt(variance))
sbFitsDict["DOP"].append([sbNum, thisData[-2], thisData[-1]])
sbFits.append(thisData)
sbFits = np.array(sbFits)
sbFitsDict = {k: np.array(v) for k, v in sbFitsDict.items()}
# This chunk used to insert the "alpha deviation", the difference between the angles and the
# nir. I don't think I use this anymore, so stop saving it
# origin_header = 'Sideband,S0,S0 err,S1,S1 err,S2,S2 err,S3,S3 err,alpha,alpha deviation,alpha err,gamma,gamma err,DOP,DOP err\n'
# origin_header += 'Order,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,deg,deg,deg,deg,deg,arb.u.,arb.u.\n'
# origin_header += 'Sideband,{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}'.format(*["{}".format(series)] * 15)
# sbFits = np.array(sbFits)
# sbFits = np.insert(sbFits, 10, sbFits[:, 9] - lAlpha, axis=1)
# sbFits = sbFits[sbFits[:, 0].argsort()]
origin_header = "#\n"*100 # to fit all other files for easy origin importing
origin_header += 'Sideband,S0,S0 err,S1,S1 err,S2,S2 err,S3,S3 err,alpha,alpha err,gamma,gamma err,DOP,DOP err\n'
origin_header += 'Order,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,deg,deg,deg,deg,arb.u.,arb.u.\n'
origin_header += 'Sideband,{},{},{},{},{},{},{},{},{},{},{},{},{},{}'.format(*["{}".format(series)] * 14)
sbFits = sbFits[sbFits[:, 0].argsort()]
if isinstance(save, str):
sbFitsSave = sbFits
if not kwargs.get("saveStokes", True):
headerlines = origin_header.splitlines()
ln, units, coms = headerlines[-3:]
ln = ','.join([ln.split(',')[0]] + ln.split(',')[9:])
units = ','.join([units.split(',')[0]] + units.split(',')[9:])
coms = ','.join([coms.split(',')[0]] + coms.split(',')[9:])
headerlines[-3:] = ln, units, coms
# remove them from the save data
origin_header = '\n'.join(headerlines)
sbFitsSave = np.delete(sbFits, range(1, 9), axis=1)
if not os.path.exists(os.path.dirname(save)):
os.mkdir(os.path.dirname(save))
np.savetxt(save, np.array(sbFitsSave), delimiter=',', header=origin_header,
comments='', fmt='%.6e')
# print("a = {:.2f} ± {:.2f}".format(sbFits[1, 9], sbFits[1, 10]))
# print("g = {:.2f} ± {:.2f}".format(sbFits[1, 11], sbFits[1, 12]))
if plot:
plt.figure("alpha")
plt.errorbar(sbFitsDict["alpha"][:, 0],
sbFitsDict["alpha"][:, 1],
sbFitsDict["alpha"][:, 2],
'o-', name = series
)
plt.figure("gamma")
plt.errorbar(sbFitsDict["gamma"][:, 0],
sbFitsDict["gamma"][:, 1],
sbFitsDict["gamma"][:, 2],
'o-', name=series
)
return sbFits, sbFitsDict
####################
# Helper functions
####################
def fvb_crr(raw_array, offset=0, medianRatio=1, noiseCoeff=5, debugging=False):
"""
Remove cosmic rays from a sequency of identical exposures
:param raw_array: The array to be cleaned. Successive spectra should
be the columns (i.e. 1600 x n) of the raw_array
:param offset: baseline to add to raw_array.
Not used, but here if it's needed in the future
:param medianRatio: Multiplier to the median when deciding a cutoff
:param noiseCoeff: Multiplier to the noise on the median
May need changing for noisy data
:return:
"""
d = np.array(raw_array)
med = ndimage.filters.median_filter(d, size=(1, d.shape[1]), mode='wrap')
med = np.median(d, axis=1).reshape(d.shape[0], 1)
if debugging:
print("shape of median filter:", med.shape)
meanMedian = med.mean(axis=1)
# meanMedian = med.copy()
if debugging:
print("shape of meaned median filter:", meanMedian.shape)
# Construct a cutoff for each pixel. It was kind of guess and
# check
cutoff = meanMedian * medianRatio + noiseCoeff * np.std(meanMedian[-100:])
if debugging:
print("shape of cutoff criteria:", cutoff.shape)
import pyqtgraph as pg
winlist = []
app = pg.QtGui.QApplication([])
win = pg.GraphicsLayoutWidget()
win.setWindowTitle("Raw Image")
p1 = win.addPlot()
img = pg.ImageItem()
img.setImage(d.copy().T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win.addItem(hist)
win.nextRow()
p2 = win.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.addLegend()
for i, v in enumerate(d.T):
p2.plot(v, pen=(i, d.shape[1]), name=str(i))
p2.plot(np.sum(d, axis=1), pen=pg.mkPen('w', width=3))
win.show()
winlist.append(win)
win2 = pg.GraphicsLayoutWidget()
win2.setWindowTitle("Median Image")
p1 = win2.addPlot()
img = pg.ImageItem()
img.setImage(med.T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win2.addItem(hist)
win2.nextRow()
p2 = win2.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.plot(np.sum(med, axis=1) / d.shape[1])
win2.show()
winlist.append(win2)
win2 = pg.GraphicsLayoutWidget()
win2.setWindowTitle("d-m")
p1 = win2.addPlot()
img = pg.ImageItem()
img.setImage((d - med).T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win2.addItem(hist)
win2.nextRow()
p2 = win2.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.addLegend()
for i, v in enumerate((d - med).T):
p2.plot(v, pen=(i, d.shape[1]), name=str(i))
p2.plot(cutoff, pen=pg.mkPen('w', width=3))
win2.show()
winlist.append(win2)
# Find the bad pixel positions
# Note the [:, None] - needed to cast the correct shapes
badPixs = np.argwhere((d - med) > (cutoff.reshape(len(cutoff), 1)))
for pix in badPixs:
# get the other pixels in the row which aren't the cosmic
if debugging:
print("cleaning pixel", pix)
p = d[pix[0], [i for i in range(d.shape[1]) if not i == pix[1]]]
if debugging:
print("\tRemaining pixels in row are", p)
# Replace the cosmic by the average of the others
# Could get hairy if more than one cosmic per row.
# Maybe when doing many exposures?
d[pix[0], pix[1]] = np.mean(p)
if debugging:
win = pg.GraphicsLayoutWidget()
win.setWindowTitle("Clean Image")
p1 = win.addPlot()
img = pg.ImageItem()
img.setImage(d.copy().T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win.addItem(hist)
win.nextRow()
p2 = win.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.plot(np.sum(d, axis=1))
win.show()
winlist.append(win)
app.exec_()
return np.array(d)
def stitchData(dataList, plot=False):
"""
Attempt to stitch together absorbance data. Will translate the second data set
to minimize leastsq between the two data sets.
:param dataList: Iterable of the data sets to be fit. Currently
it only takes the first two elements of the list, but should be fairly
straightforward to recursivly handle a list>2. Shifts the second
data set to overlap the first
elements of dataList can be either np.arrays or Absorbance class,
where it will take the proc_data itself
:param plot: bool whether or not you want the fit iterations to be plotted
(for debugging)
:return: a, a (2,) np.array of the shift
"""
# Data coercsion, make sure we know what we're working wtih
first = dataList[0]
if isinstance(first, Absorbance):
first = first.proc_data
second = dataList[1]
if isinstance(second, Absorbance):
second = second.proc_data
if plot:
# Keep a reference to whatever plot is open at call-time
# Useful if the calling script has plots before and after, as
# omitting this will cause future plots to be added to figures here
firstFig = plt.gcf()
plt.figure("Stitcher")
# Plot the raw input data
plt.plot(*first.T)
plt.plot(*second.T)
# Algorithm is set up such that the "second" data set spans the
# higher domain than first. Need to enforce this, and remember it
# so the correct shift is applied
flipped = False
if max(first[:, 0]) > max(second[:, 0]):
flipped = True
first, second = second, first
def stitch_hsg_dicts(full_obj, new_obj, need_ratio=False, verbose=False, ratios=[1,1],
override_ratio = False, ignore_weaker_lowers = True):
"""
This helper function takes a FullHighSideband and a sideband
object, either CCD or PMT and smushes the new sb_results into the full_dict.
The first input doesn't change, so f there's a PMT set of data involved, it
should be in the full variable to keep the laser normalization intact.
This function almost certainly does not work for stitching many negative orders
in it's current state
11/14/16
--------
This function has been updated to take the CCD objects themselves to be more
intelligent about stitching. Consider two scans, (a) spec step 0 with 1 gain, spec
step 2 with 110 gain and (b) spec step 0 with 50 gain and spec step 1 with 110 gain.
The old version would always take spec step 0 to scale to, so while comparisons
between spec step 0 and 1 for either case is valid, comparison between (a) and (b)
were not, since they were scaled to different gain parameters. This new code will
check what the gain values are and scale to the 110 data set, if present. This seems
valid because we currently always have a 110 gain exposure for higher order
sidebands.
The exception is if the laser is present (sideband 0), as that is an absolute
measure to which all else should be related.
TODO: run some test cases to test this.
06/11/18
--------
That sometimes was breaking if there were only 3-4 sidebands to fit with poor
SNR. I've added the override_ratio to be passed to set a specific ratio to scale
by. From data on 06/03/18, the 50gain to 110gain is a ~3.6 ratio. I haven't done
a clean way of specifying which data set it should be scaled. Right now,
it leaves the laser line data, or the 110 gain data alone.
Inputs:
full = full_dict from FullHighSideband, or HighSidebandPMT. It's important
that it contains lower orders than the new_dict.
new_dict = another full_dict.
need_ratio = If gain or other parameters aren't equal and must resort to
calculating the ratio instead of the measurements being equivalent.
Changing integration time still means N photons made M counts,
but changing gain or using PMT or whatever does affect things.
ratios: Will update with the values to the ratios needed to scale the data.
ratios[0] is the ratio for the "full_obj"
ratios[1] is the ratio for the "new_obj"
one of them will be one, one will be the appropriate scale, since one of
them is unscaled. This is strictly speaking an output
override_ratio: Pass a float to specify the ratio that should be used.
ignore_weaker_lowers: Sometimes, a SB is in the short pass filter so a lower
order is weaker than the next highest. If True, causes script to ignore all
sidebands which are weaker and lower order.
Returns:
full = extended version of the input full. Overlapping sidebands are
averaged because that makes sense?
"""
if isinstance(full_obj, dict) and isinstance(new_obj, dict):
return stitch_hsg_dicts_old(full_obj, new_obj, need_ratio, verbose)
if verbose:
print("=" * 15)
print()
print("Stitching HSG dicts")
print()
print("=" * 15)
# remove potentially offensive SBs, i.e. a 6th order SB being in the SPF for more
# data, but being meaningless to pull intensity information from.
# Note: this might not be the best if you get to higher order stitches where it's
# possible that the sidebands might not be monotonic (from noise?)
if ignore_weaker_lowers:
full_obj.full_dict, full_obj.sb_results = FullHighSideband.parse_sb_array(full_obj.sb_results)
new_obj.new_dict, new_obj.sb_results = FullHighSideband.parse_sb_array(new_obj.sb_results)
# was fucking around with references and causing updates to arrays when it shouldn't
# be
full = copy.deepcopy(full_obj.full_dict)
new_dict = copy.deepcopy(new_obj.full_dict)
# Force a rescaling if you've passed a specified parameter
# if isinstance(override_ratio, float):
# need_ratio = True
# Do some testing to see which dict should be scaled to the other
# I honestly forget why I prioritized the PMT first like this. But the third
# check looks to make a gain 110 prioritize non-110, unless the non-110 includes
# a laser line
scaleTo = ""
if need_ratio:
if isinstance(new_obj, HighSidebandPMT):
scaleTo = "new"
elif isinstance(full_obj, HighSidebandPMT):
scaleTo = "full"
elif new_obj.parameters["gain"] == 110 and full_obj.parameters["gain"] != 110 \
and 0 not in full:
scaleTo = "new"
else:
scaleTo = "full"
if verbose:
print("\tI'm adding these sidebands", sorted(new_dict.keys()))
print("\t With these:", sorted(full.keys()))
overlap = [] # The list that hold which orders are in both dictionaries
missing = [] # How to deal with sidebands that are missing from full but in new.
for new_sb in sorted(new_dict.keys()):
full_sbs = sorted(full.keys())
if new_sb in full_sbs:
overlap.append(new_sb)
elif new_sb not in full_sbs and new_sb < full_sbs[-1]:
# This probably doesn't work with bunches of negative orders
missing.append(new_sb)
if verbose:
print("\t ( overlap:", overlap, ")")
print("\t ( missing:", missing, ")")
# This if-else clause handles how to average together overlapping sidebands
# which are seen in both spectra,
if need_ratio:
# Calculate the appropriate ratio to multiply the new sidebands by.
# I'm not entirely sure what to do with the error of this guy.
ratio_list = []
try:
new_starter = overlap[-1]
if verbose:
print("\n\tadding these ratios,", end=' ')
if len(overlap) > 2:
overlap = [x for x in overlap if (x % 2 == 0)
]# and (x != min(overlap) and (x != max(overlap)))]
if scaleTo == "new":
if verbose:
print("scaling to new :")
for sb in overlap:
ratio_list.append(new_dict[sb][2]/full[sb][2])
if verbose:
print("\t\t{:2.0f}: {:.3e}/{:.3e} ~ {:.3e},".format(sb, new_dict[sb][2],
full[sb][2], ratio_list[-1]))
# new_ratio = 1 06/11/18 Not sure what these were used for
ratio = np.mean(ratio_list)
else:
if verbose:
print("scaling to full:")
for sb in overlap:
ratio_list.append(full[sb][2] / new_dict[sb][2])
if verbose:
print("\t\t{:2.0f}: {:.3e}/{:.3e} ~ {:.3e},".format(sb, full[sb][2],
new_dict[sb][2], ratio_list[-1]))
# new_ratio = np.mean(ratio_list) 06/11/18 Not sure what these were used for
ratio = np.mean(ratio_list)
# Maybe not the best way to do it, performance wise, since you still
# iterate through the list, even though you'll override it.
if isinstance(override_ratio, float):
ratio = override_ratio
if verbose:
print("overriding calculated ratio with user inputted")
error = np.std(ratio_list) / np.sqrt(len(ratio_list))
except IndexError:
# If there's no overlap (which you shouldn't let happen), hardcode a ratio
# and error. I looked at all the ratios for the overlaps from 6/15/16
# (540ghz para) to get the rough average. Hopefully they hold for all data.
if not overlap:
ratio = 0.1695
error = 0.02
# no overlap, so make sure it grabs all the sidebands
new_starter = min(new_dict.keys())
else:
raise
if verbose:
# print "Ratio list\n\t", ("{:.3g}, "*len(ratio_list))[:-2].format(*ratio_list)
# print "Overlap \n\t", [round(ii, 3) for ii in overlap]
print("\t Ratio: {:.3g} +- {:.3g} ({:.2f}%)\n".format(ratio, error, error/ratio*100))
# Adding the new sidebands to the full set and moving errors around.
# I don't know exactly what to do about the other aspects of the sidebands
# besides the strength and its error.
if scaleTo == "full":
ratios[1] = ratio
for sb in overlap:
if verbose:
print("For SB {:02d}, original strength is {:.3g} +- {:.3g} ({:.3f}%)".format(int(sb), new_dict[sb][2], new_dict[sb][3],
new_dict[sb][3]/new_dict[sb][2]*100
))
new_dict[sb][3] = ratio * new_dict[sb][2] * np.sqrt((error / ratio) ** 2 + (new_dict[sb][3] / new_dict[sb][2]) ** 2)
new_dict[sb][2] = ratio * new_dict[sb][2]
if verbose:
print("\t\t scaled\t\t\t\t{:.3g} +- {:.3g} ({:.3f}%)".format(new_dict[sb][2],
new_dict[sb][3],
new_dict[sb][3]/new_dict[sb][2]*100))
print("\t\t full\t\t\t\t\t{:.3g} +- {:.3g} ({:.3f}%)".format(full[sb][2],
full[sb][3],
full[sb][3]/full[sb][2]*100))
sb_error = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (
new_dict[sb][3] ** 2)) / (full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = sb_error
if verbose:
print("\t\t replaced with \t\t{:.3g} +- {:.3g} ({:.3f}%)".format(full[sb][2],
full[sb][3],
full[sb][3]/full[sb][2]*100))
print()
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (
new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
else:
ratios[0] = ratio
for sb in overlap:
full[sb][3] = ratio * full[sb][2] * np.sqrt((error / ratio) ** 2 + (full[sb][3] / full[sb][2]) ** 2)
full[sb][2] = ratio * full[sb][2]
sberror = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (
new_dict[sb][3] ** 2)) / (full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = sberror
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (
new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
else: # not needing a new ratio
try:
new_starter = overlap[-1] # This grabs the sideband order where only the new dictionary has
# sideband information. It's not clear why it necessarily has to be
# at this line.
overlap = [x for x in overlap if (x % 2 == 0)
] # and (x != min(overlap) and (x != max(overlap)))]
# This cuts out the lowest order sideband in the overlap for mysterious reasons
for sb in overlap: # This for loop average two data points weighted by their relative errors
if verbose:
print("The sideband", sb)
print("Old value", full[sb][4] * 1000)
print("Add value", new_dict[sb][4] * 1000)
try:
error = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (new_dict[sb][3] ** 2)) / (
full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = error
except RuntimeWarning:
raise IOError()
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
if verbose:
print("New value", lw_avg * 1000)
except:
new_starter = 0 # I think this makes things work when there's no overlap
if verbose:
print("appending new elements. new_starter={}".format(new_starter))
for sb in [x for x in list(new_dict.keys()) if ((x > new_starter) or (x in missing))]:
full[sb] = new_dict[sb]
if scaleTo == "full":
full[sb][2] = ratio * full[sb][2]
full[sb][3] = full[sb][2] * np.sqrt((error / ratio) ** 2 + (ratio * full[sb][3] / full[sb][2]) ** 2)
if scaleTo == "new":
for sb in set(full.keys()) - set(sorted(new_dict.keys())[:]):
full[sb][2] *= ratio
# TODO: I think this is an invalid error
# propagation (since ratio has error associated with it
full[sb][3] *= ratio
if verbose:
print("I made this dictionary", sorted(full.keys()))
print('-'*19)
return full
return full, ratio #the fuck? Why was this here?
return full
def stitch_hsg_dicts_old(full, new_dict, need_ratio=False, verbose=False):
"""
This helper function takes a FullHighSideband.full_dict attribute and a sideband
object, either CCD or PMT and smushes the new sb_results into the full_dict.
The first input doesn't change, so f there's a PMT set of data involved, it
should be in the full variable to keep the laser normalization intact.
This function almost certainly does not work for stitching many negative orders
in it's current state
11/14/16
--------
The original function has been updated to take the full object (instead of
the dicts alone) to better handle calculating ratios when stitching. This is called
once things have been parsed in the original function (or legacy code where dicts
are passed instead of the object)
Inputs:
full = full_dict from FullHighSideband, or HighSidebandPMT. It's important
that it contains lower orders than the new_dict.
new_dict = another full_dict.
need_ratio = If gain or other parameters aren't equal and must resort to
calculating the ratio instead of the measurements being equivalent.
Changing integration time still means N photons made M counts,
but changing gain or using PMT or whatever does affect things.
Returns:
full = extended version of the input full. Overlapping sidebands are
averaged because that makes sense?
"""
if verbose:
print("I'm adding these sidebands in old stitcher", sorted(new_dict.keys()))
overlap = [] # The list that hold which orders are in both dictionaries
missing = [] # How to deal with sidebands that are missing from full but in new.
for new_sb in sorted(new_dict.keys()):
full_sbs = sorted(full.keys())
if new_sb in full_sbs:
overlap.append(new_sb)
elif new_sb not in full_sbs and new_sb < full_sbs[-1]:
# This probably doesn't work with bunches of negative orders
missing.append(new_sb)
if verbose:
print("overlap:", overlap)
print("missing:", missing)
# This if-else clause handles how to average together overlapping sidebands
# which are seen in both spectra,
if need_ratio:
# Calculate the appropriate ratio to multiply the new sidebands by.
# I'm not entirely sure what to do with the error of this guy.
ratio_list = []
#print '\n1979\nfull[2]', full[0][2]
try:
new_starter = overlap[-1]
if len(overlap) > 2:
overlap = [x for x in overlap if (x % 2 == 0)
]#and (x != min(overlap) and (x != max(overlap)))]
for sb in overlap:
ratio_list.append(full[sb][2] / new_dict[sb][2])
ratio = np.mean(ratio_list)
# print
# print '-'*15
# print "ratio for {}: {}".format()
error = np.std(ratio_list) / np.sqrt(len(ratio_list))
except IndexError:
# If there's no overlap (which you shouldn't let happen),
# hardcode a ratio and error.
# I looked at all the ratios for the overlaps from 6/15/16
# (540ghz para) to get the rough average. Hopefully they hold
# for all data.
if not overlap:
ratio = 0.1695
error = 0.02
# no overlap, so make sure it grabs
# all the sidebands
new_starter = min(new_dict.keys())
else:
raise
if verbose:
print("Ratio list","\n", [round(ii, 3) for ii in ratio_list])
print("Overlap ","\n", [round(ii, 3) for ii in overlap])
print("Ratio", ratio)
print("Error", error)
#print '\n2118\nfull[2]', full[0][2]
# Adding the new sidebands to the full set and moving errors around.
# I don't know exactly what to do about the other aspects of the sidebands
# besides the strength and its error.
for sb in overlap:
full[sb][2] = ratio * new_dict[sb][2]
full[sb][3] = full[sb][2] * np.sqrt((error / ratio) ** 2 + (new_dict[sb][3] / new_dict[sb][2]) ** 2)
#print '\n2125\nfull[2]', full[0][3]
# Now for linewidths
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error
#print '\n2132\nfull[2]', full[0][2]
else:
try:
new_starter = overlap[-1] # This grabs the sideband order where only the new dictionary has
# sideband information. It's not clear why it necessarily has to be
# at this line.
overlap = [x for x in overlap if (x % 2 == 0) and (x != min(overlap) and (x != max(overlap)))]
# This cuts out the lowest order sideband in the overlap for mysterious reasons
for sb in overlap: # This for loop average two data points weighted by their relative errors
if verbose:
print("The sideband", sb)
print("Old value", full[sb][4] * 1000)
print("Add value", new_dict[sb][4] * 1000)
error = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (new_dict[sb][3] ** 2)) / (
full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = error
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
if verbose:
print("New value", lw_avg * 1000)
except:
new_starter = 0 # I think this makes things work when there's no overlap
if verbose:
print("appending new elements. new_starter={}".format(new_starter))
# This loop will add the sidebands which were only seen in the second step
for sb in [x for x in list(new_dict.keys()) if ((x >= new_starter) or (x in missing))]:
full[sb] = new_dict[sb]
if need_ratio:
full[sb][2] = ratio * full[sb][2]
full[sb][3] = full[sb][2] * np.sqrt((error / ratio) ** 2 + (ratio * full[sb][3] / full[sb][2]) ** 2)
#print '\n2164\nfull[2]', full[0][2]
if verbose:
print("I made this dictionary", sorted(full.keys()))
return full
def save_parameter_sweep_no_sb(spectrum_list, file_name, folder_str, param_name, unit,
verbose=False):
"""
This function will take a fully processed list of spectrum objects and
slice Spectrum.sb_fits appropriately to get an output like:
"Parameter" | SB1 freq | err | SB1 amp | error | SB1 linewidth | error | SB2...| SBn...|
param1 | . |
param2 | . |
.
.
.
Currently I'm thinking fuck the offset y0
After constructing this large matrix, it will save it somewhere.
"""
spectrum_list.sort(key=lambda x: x.parameters[param_name])
included_spectra = dict()
param_array = None
sb_included = []
for spec in spectrum_list:
sb_included = sorted(list(set(sb_included + list(spec.full_dict.keys()))))
included_spectra[spec.fname.split('/')[-1]] = spec.parameters[param_name]
# If these are from summed spectra, then only the the first file name
# from that sum will show up here, which should be fine?
if verbose:
# print "full name:", spectrum_list[0].fname
print("included names:", included_spectra)
print("sb_included:", sb_included)
for spec in spectrum_list:
temp_dict = {} # This is different from full_dict in that the list has the
# sideband order as the zeroth element.
if verbose:
print("the sb_results:", spec.sb_results)
if spec.sb_results.ndim == 1: continue
for index in range(len(spec.sb_results[:, 0])):
if verbose:
print("my array slice:", spec.sb_results[index, :])
temp_dict[int(round(spec.sb_results[index, 0]))] = np.array(
spec.sb_results[index, 1:])
if verbose:
print(temp_dict)
for sb in sb_included:
blank = np.zeros(6)
# print "checking sideband order:", sb
# print "blank", blank
if sb not in temp_dict:
# print "\nNeed to add sideband order:", sb
temp_dict[sb] = blank
try: # Why is this try-except here?
spec_data = np.array([float(spec.parameters[param_name])])
except:
spec_data = np.array([float(spec.parameters[param_name][:2])])
for key in sorted(temp_dict.keys()):
# print "I am going to hstack this:", temp_dict[key]
spec_data = np.hstack((spec_data, temp_dict[key]))
try:
param_array = np.vstack((param_array, spec_data))
except:
param_array = np.array(spec_data)
if verbose:
print("The shape of the param_array is:", param_array.shape)
# print "The param_array itself is:", param_array
'''
param_array_norm = np.array(param_array).T # python iterates over rows
for elem in [x for x in xrange(len(param_array_norm)) if (x-1)%7 == 3]:
temp_max = np.max(param_array_norm[elem])
param_array_norm[elem] = param_array_norm[elem] / temp_max
param_array_norm[elem + 1] = param_array_norm[elem + 1] / temp_max
'''
snipped_array = param_array[:, 0]
norm_array = param_array[:, 0]
if verbose:
print("Snipped_array is", snipped_array)
for ii in range(len(param_array.T)):
if (ii - 1) % 6 == 0:
if verbose:
print("param_array shape", param_array[:, ii])
snipped_array = np.vstack((snipped_array, param_array[:, ii]))
norm_array = np.vstack((norm_array, param_array[:, ii]))
elif (ii - 1) % 6 == 2:
snipped_array = np.vstack((snipped_array, param_array[:, ii]))
temp_max = np.max(param_array[:, ii])
norm_array = np.vstack((norm_array, param_array[:, ii] / temp_max))
elif (ii - 1) % 6 == 3:
snipped_array = np.vstack((snipped_array, param_array[:, ii]))
norm_array = np.vstack((norm_array, param_array[:, ii] / temp_max))
snipped_array = snipped_array.T
norm_array = norm_array.T
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
norm_name = file_name + '_norm.txt'
snip_name = file_name + '_snip.txt'
file_name = file_name + '.txt'
try:
included_spectra_str = json.dumps(included_spectra, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: save_parameter_sweep\nJSON FAILED")
return
included_spectra_str = included_spectra_str.replace('\n', '\n#')
included_spectra_str += '\n#' * (99 - included_spectra_str.count('\n'))
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
for order in sb_included:
origin_import1 += "Frequency,error,Sideband strength,error,Linewidth,error"
origin_import2 += ",eV,,arb. u.,,meV,"
origin_import3 += ",{0},,{0},,{0},".format(order)
origin_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
for order in sb_included:
origin_import1 += ",Frequency,Sideband strength,error"
origin_import2 += ",eV,arb. u.,"
origin_import3 += ",{0},{0},".format(order)
origin_snip = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
header_total = '#' + included_spectra_str + '\n' + origin_total
header_snip = '#' + included_spectra_str + '\n' + origin_snip
# print "Spec header: ", spec_header
if verbose:
print("the param_array is:", param_array)
np.savetxt(os.path.join(folder_str, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, snip_name), snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, norm_name), norm_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
if verbose:
print("Saved the file.\nDirectory: {}".format(
os.path.join(folder_str, file_name)))
def save_parameter_sweep(spectrum_list, file_name, folder_str, param_name, unit,
wanted_indices = [1, 3, 4], skip_empties = False, verbose=False,
header_dict = {}, only_even=False):
"""
This function will take a fully processed list of spectrum objects and
slice Spectrum.sb_fits appropriately to get an output like:
"Parameter" | SB1 freq | err | SB1 amp | error | SB1 linewidth | error | SB2...| SBn...|
param1 | . |
param2 | . |
.
.
.
Currently I'm thinking fuck the offset y0
After constructing this large matrix, it will save it somewhere.
Thus function has been update to pass a list of indices to slice for the return
values
skip_empties: If False, will add a row of zeroes for the parameter even if no sidebands
are found. If True, will not add a line for that parameter
only_even: don't include odd orders in the saved sweep
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
"""
if isinstance(param_name, list):
# if you pass two things because the param you want
# is in a dict (e.g. field strength has mean/std)
# do it that way
param_name_list = list(param_name) # keep reference to old one
paramGetter = lambda x: x.parameters[param_name_list[0]][param_name_list[1]]
# Keep the name for labeling things later on
param_name = param_name[0]
else:
paramGetter = lambda x: x.parameters[param_name]
# Sort all of the spectra based on the desired key
spectrum_list.sort(key=paramGetter)
# keep track of which file name corresponds to which parameter which gets put in
included_spectra = dict()
# The big array which will be stacked up to keep all of the sideband details vs desired parameter
param_array = None
# list of which sidebands are seen throughout.
sb_included = []
# how many parameters (area, strength, linewidth, pos, etc.) are there?
# Here incase software changes and more things are kept in
# sb results. Needed to handle how to slice the arrays
try:
num_params = spectrum_list[0].sb_results.shape[1]
except IndexError:
# There's a file with only 1 sb and it happens to be first
# in the list.
num_params = spectrum_list[0].sb_results.shape[0]
except AttributeError:
# The first file has no sidebands, so just hardcode it, as stated below.
num_params=0
# Rarely, there's an issue where I'm doing some testing and there's a set
# where the first file has no sidebands in it, so the above thing returns 0
# It seems really silly to do a bunch of testing to try and correct for that, so
# I'm going to hardcode the number of parameters.
if num_params == 0:
num_params = 7
# loop through all of them once to figure out which sidebands are seen in all spectra
for spec in spectrum_list:
try:
# use sets to keep track of only unique sidebands
sb_included = sorted(list(set(sb_included + list(spec.full_dict.keys()))))
except AttributeError:
print("No full dict?", spec.fname)
print(spec.sb_list)
# If these are from summed spectra, then only the the first file name
# from that sum will show up here, which should be fine?
included_spectra[spec.fname.split('/')[-1]] = paramGetter(spec)
if only_even:
sb_included = [ii for ii in sb_included if not ii%2]
if verbose:
print("included names:", included_spectra)
print("sb_included:", sb_included)
for spec in spectrum_list:
# Flag to keep whethere there are no sidebands or not. Used to skip
# issues when trying to index on empty arrays
noSidebands = False
if verbose:
print("the sb_results:", spec.sb_results)
# if no sidebands were found, skip this one
try:
# TODO: (08/14/18) the .ndim==1 isn't the correct check, since it fails
# when looking at the laser line. Need to test this with a real
# empty data set, vs data set with 1 sb
#
#
# (08/28/18) I'm not sure what the "not spec" is trying to handle
# spec.sb_results is None occurs when _no_ sidebands were fit
# spec.sb_results.ndim == 1 happens when only one sideband is found
if not spec or spec.sb_results is None or spec.sb_results.ndim == 1:
if spec.sb_results is None:
# Flag no sidebands are afound
noSidebands = True
elif spec.sb_results[0] == 0:
# Cast it to 2d to allow slicing later on. Not sure hwy this is
# only done if the laser line is the one found.
spec.sb_results = np.atleast_2d(spec.sb_results)
elif skip_empties:
continue
else:
noSidebands = True
except (AttributeError, TypeError):
# continue
raise
# Make an sb_results of all zeroes where we'll fill
# in the sideband info we found
new_spec = np.zeros((len(sb_included), num_params))
if not noSidebands:
sb_results = spec.sb_results.copy()
saw_sbs = sb_results[:, 0]
found_sb = sorted(list(set(sb_included) & set(saw_sbs)))
found_idx = [sb_included.index(ii) for ii in found_sb]
try:
new_spec[:, 0] = sb_included
except:
print("new_spec", new_spec)
raise
try:
if only_even:
new_spec[found_idx, :] = sb_results[sb_results[:,0]%2==0]
else:
new_spec[found_idx, :] = sb_results
except ValueError:
print(spec.fname)
print("included:", sb_included)
print("found:", found_sb, found_idx)
print(new_spec.shape, sb_results.shape)
print(sb_results)
print(new_spec)
raise
spec_data = np.insert(new_spec.flatten(), 0, float(paramGetter(spec)))
try:
param_array = np.row_stack((param_array, spec_data))
except:
param_array = np.array(spec_data)
if param_array.ndim == 1: # if you only pass one spectra
param_array = param_array[None, :] # recast it to 2D for slicing
# the indices we want from the param array from the passed argument
snip = wanted_indices
N = len(sb_included)
# run it out across all of the points across the param_array
snipped_indices = [0] + list(
1+np.array(snip * N) + num_params * np.array(sorted(list(range(N)) * len(snip))))
snipped_array = param_array[:, snipped_indices]
norm_array = snipped_array.copy()
# normalize the area if it's requested
if 3 in snip:
num_snip = len(snip)
strength_idx = snip.index(3)
if 4 in snip:
#normalize error first if it was requested
idx = snip.index(4)
norm_array[:, 1 + idx + np.arange(N) * num_snip] /= norm_array[:,1 + strength_idx + np.arange(N) * num_snip].max(axis=0)
strength_idx = snip.index(3)
norm_array[:, 1+strength_idx+np.arange(N)*num_snip]/=norm_array[:, 1+strength_idx+np.arange(N)*num_snip].max(axis=0)
try:
os.mkdir(folder_str)
except TypeError:
pass # if you pass None as folder_str (for using byteIO)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
included_spectra.update(header_dict)
try:
included_spectra_str = json.dumps(included_spectra, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: save_parameter_sweep\nJSON FAILED")
return
included_spectra_str = included_spectra_str.replace('\n', '\n#')
included_spectra_str += '\n#' * (99 - included_spectra_str.count('\n'))
# this will make the header chunk for the full, un-sliced data set
# TODO: fix naming so you aren't looping twice
### 1/9/18 This isn't needed, right? Why isn't it deleted?
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
for order in sb_included:
origin_import1 += ",sideband,Frequency,error,Sideband strength,error,Linewidth,error"
origin_import2 += ",order,eV,eV,arb. u.,arb.u.,meV,meV"
origin_import3 += ",,{0},,{0},,{0},".format(order)
origin_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
# This little chunk will make a chunk block of header strings for the sliced
# data set which can be looped over
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
wanted_titles = ["Sideband", "Frequency", "error", "Sideband strength","error","Linewidth","error"]
wanted_units = ["order", "eV", "eV", "arb. u.", "arb. u.", "eV", "eV"]
wanted_comments = ["", "{0}", "", "{0}", "", "{0}", ""]
wanted_titles = ",".join([wanted_titles[ii] for ii in wanted_indices])
wanted_units = ",".join([wanted_units[ii] for ii in wanted_indices])
wanted_comments = ",".join([wanted_comments[ii] for ii in wanted_indices])
for order in sb_included:
origin_import1 += ","+wanted_titles
origin_import2 += ","+wanted_units
origin_import3 += ","+wanted_comments.format(order)
origin_snip = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
header_total = '#' + included_spectra_str + '\n' + origin_total
header_snip = '#' + included_spectra_str + '\n' + origin_snip
# print "Spec header: ", spec_header
if verbose:
print("the param_array is:", param_array)
if isinstance(file_name, list):
if isinstance(file_name[0], io.BytesIO):
np.savetxt(file_name[0], param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
np.savetxt(file_name[1], snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
np.savetxt(file_name[2], norm_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
# Need to reset the file position if you want to read them immediately
# Is it better to do that here, or assume you'll do it later?
# I'm gonna assume here, because I can't currently think of a time when I'd want
# to be at the end of the file
[ii.seek(0) for ii in file_name]
if verbose:
print("Saved the file to bytes objects")
else:
if file_name:
norm_name = file_name + '_norm.txt'
snip_name = file_name + '_snip.txt'
file_name = file_name + '.txt'
np.savetxt(os.path.join(folder_str, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, snip_name), snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, norm_name), norm_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
if verbose:
print("Saved the file.\nDirectory: {}".format(os.path.join(folder_str, file_name)))
else:
if verbose:
print("Didn't save")
return sb_included, param_array, snipped_array, norm_array
def save_parameter_sweep_vs_sideband(spectrum_list, file_name,
folder_str, param_name, unit, verbose=False,
wanted_indices = [1, 3, 4]):
"""
Similar to save_parameter_sweep, but the data[:,0] column is sideband number instead of
series, and each set of columns correspond to a series step. Pretty much compiles
all of the fit parameters from the files that are already saved and puts it into
one file to keep from polluting the Origin folder
:param spectrum_list:
:param file_name:
:param folder_str:
:param param_name:
:param unit:
:param verbose:
sb number is automatically prepended, so do not include in slicing list
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
:return:
"""
spectrum_list.sort(key=lambda x: x.parameters[param_name])
included_spectra = dict()
param_array = None
sb_included = []
# what parameters were included (for headers)
params = sorted([x.parameters[param_name] for x in spectrum_list])
for spec in spectrum_list:
sb_included = sorted(list(set(sb_included + list(spec.full_dict.keys()))))
included_spectra[spec.fname.split('/')[-1]] = spec.parameters[param_name]
# If these are from summed spectra, then only the the first file name
# from that sum will show up here, which should be fine?
if verbose:
# print "full name:", spectrum_list[0].fname
print("included names:", included_spectra)
print("sb_included:", sb_included)
param_array = np.array(sb_included)
for spec in spectrum_list:
temp_dict = spec.full_dict.copy()
#prevent breaking if no sidebands in spectrum
if not temp_dict:
if verbose:
print("No sidebands here? {}, {}".format(spec.parameters["series"],
spec.parameters["spec_step"]))
continue
if verbose:
print(temp_dict)
# matrix for holding all of the sb information
# for a given spectrum
spec_matrix = None
for sb in sb_included:
blank = np.zeros(6)
# print "checking sideband order:", sb
# print "blank", blank
sb_data = temp_dict.get(sb, blank)
try:
spec_matrix = np.row_stack((spec_matrix, sb_data))
except:
spec_matrix = sb_data
param_array = np.column_stack((param_array, spec_matrix))
# the indices we want from the param array
# 1- freq, 3-area, 4-area error
snip = wanted_indices
N = len(spectrum_list)
# run it out across all of the points across the param_array
snipped_indices = [0] + list( np.array(snip*N) + 6*np.array(sorted(list(range(N))*len(snip))) )
snipped_array = param_array[:, snipped_indices]
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
snip_name = file_name + '_snip.txt'
file_name = file_name + '.txt'
try:
included_spectra_str = json.dumps(included_spectra, sort_keys=True, indent=4, separators=(',', ': '))
except:
print("Source: save_parameter_sweep\nJSON FAILED")
return
included_spectra_str = included_spectra_str.replace('\n', '\n#')
included_spectra_str += '\n#' * (99 - included_spectra_str.count('\n'))
origin_import1 = "Sideband"
origin_import2 = "Order"
origin_import3 = "SB"
for param in params:
origin_import1 += ",Frequency,error,Sideband strength,error,Linewidth,error"
origin_import2 += ",eV,,arb. u.,,meV,"
origin_import3 += ",{0},,{0},,{0},".format(param)
origin_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
# This little chunk will make a chunk block of header strings for the sliced
# data set which can be looped over
origin_import1 = "Sideband"
origin_import2 = "Order"
origin_import3 = "SB"
wanted_titles = ["Sideband", "Frequency", "error", "Sideband strength", "error",
"Linewidth", "error"]
wanted_units = ["order", "eV", "eV", "arb. u.", "arb. u.", "eV", "eV"]
wanted_comments = ["", "{0}", "", "{0}", "", "{0}", ""]
wanted_titles = ",".join([wanted_titles[ii] for ii in wanted_indices])
wanted_units = ",".join([wanted_units[ii] for ii in wanted_indices])
wanted_comments = ",".join([wanted_comments[ii] for ii in wanted_indices])
for param in params:
origin_import1 += "," + wanted_titles
origin_import2 += "," + wanted_units
origin_import3 += "," + wanted_comments.format(param)
origin_snip = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
header_total = '#' + included_spectra_str + '\n' + origin_total
header_snip = '#' + included_spectra_str + '\n' + origin_snip
# print "Spec header: ", spec_header
if verbose:
print("the param_array is:", param_array)
if file_name: # allow passing false (or empty string) to prevent saving
np.savetxt(os.path.join(folder_str, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, snip_name), snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
if verbose:
print("Saved the file.\nDirectory: {}".format(os.path.join(folder_str, file_name)))
return None
def stitchData(dataList, plot=False):
"""
Attempt to stitch together absorbance data. Will translate the second data set
to minimize leastsq between the two data sets.
:param dataList: Iterable of the data sets to be fit. Currently
it only takes the first two elements of the list, but should be fairly
straightforward to recursivly handle a list>2. Shifts the second
data set to overlap the first
elements of dataList can be either np.arrays or Absorbance class,
where it will take the proc_data itself
:param plot: bool whether or not you want the fit iterations to be plotted
(for debugging)
:return: a, a (2,) np.array of the shift
"""
# Data coercsion, make sure we know what we're working wtih
first = dataList[0]
if isinstance(first, Absorbance):
first = first.proc_data
second = dataList[1]
if isinstance(second, Absorbance):
second = second.proc_data
if plot:
# Keep a reference to whatever plot is open at call-time
# Useful if the calling script has plots before and after, as
# omitting this will cause future plots to be added to figures here
firstFig = plt.gcf()
plt.figure("Stitcher")
# Plot the raw input data
plt.plot(*first.T)
plt.plot(*second.T)
# Algorithm is set up such that the "second" data set spans the
# higher domain than first. Need to enforce this, and remember it
# so the correct shift is applied
flipped = False
if max(first[:, 0]) > max(second[:, 0]):
flipped = True
first, second = second, first
a, _, _, msg, err = spo.leastsq(fitter, [0.0001, 0.01 * max(first[:, 1])], args=(second, first), full_output=1)
# print "a", a
if plot:
# Revert back to the original figure, as per top comments
plt.figure(firstFig.number)
# Need to invert the shift if we flipped which
# model we're supposed to move
if flipped: a *= -1
return a
def integrateData(data, t1, t2, ave=False):
"""
Integrate a discrete data set for a
given time period. Sums the data between
the given bounds and divides by dt. Optional
argument to divide by T = t2-t1 for calculating
averages.
data = 2D array. data[:,0] = t, data[:,1] = y
t1 = start of integration
t2 = end of integration
if data is a NxM, with M>=3, it will take the
third column to be the errors of the points,
and return the error as the quadrature sum
"""
t = data[:, 0]
y = data[:, 1]
if data.shape[0] >= 3:
errors = data[:, 2]
else:
errors = np.ones_like(y) * np.nan
gt = set(np.where(t > t1)[0])
lt = set(np.where(t < t2)[0])
# find the intersection of the sets
vals = list(gt & lt)
# Calculate the average
tot = np.sum(y[vals])
error = np.sqrt(np.sum(errors[vals] ** 2))
# Multiply by sampling
tot *= (t[1] - t[0])
error *= (t[1] - t[0])
if ave:
# Normalize by total width if you want an average
tot /= (t2 - t1)
errors /= (t2 - t1)
if not np.isnan(error):
return tot, error
return tot
def fourier_prep(x_vals, y_vals, num=None):
"""
This function will take a Nx2 array with unevenly spaced x-values and make
them evenly spaced for use in fft-related things.
And remove nans!
"""
y_vals = handle_nans(y_vals)
spline = spi.interp1d(x_vals, y_vals,
kind='linear') # for some reason kind='quadratic' doesn't work? returns all nans
if num is None:
num = len(x_vals)
even_x = np.linspace(x_vals[0], x_vals[-1], num=num)
even_y = spline(even_x)
# even_y = handle_nans(even_y)
return even_x, even_y
def handle_nans(y_vals):
"""
This function removes nans and replaces them with linearly interpolated
values. It requires that the array maps from equally spaced x-values.
Taken from Stack Overflow: "Interpolate NaN values in a numpy array"
"""
nan_idx = np.isnan(y_vals)
my_lambda = lambda x: x.nonzero()[0] # Returns the indices where Trues reside
y_vals[nan_idx] = np.interp(my_lambda(nan_idx), my_lambda(~nan_idx), y_vals[~nan_idx])
return y_vals
def calc_laser_frequencies(spec, nir_units="eV", thz_units="eV",
bad_points=-2, inspect_plots=False):
"""
Calculate the NIR and FEL frequency for a spectrum
:param spec: HSGCCD object to fit
:type spec: HighSidebandCCD
:param nir_units: str of desired units.
Options: wavenumber, eV, meV, THz, GHz, nm
:param thz_units: str of desired units.
Options: wavenumber, eV, meV, THz, GHz, nm
:param bad_points: How many bad points which shouldn't be used
to calculate the frequencies (generally because the last
few points are noisy and unreliable)
:return: <NIR freq>, <THz freq>
"""
if not hasattr(spec, "sb_results"):
spec.guess_sidebands()
spec.fit_sidebands()
sidebands = spec.sb_results[:, 0]
locations = spec.sb_results[:, 1]
errors = spec.sb_results[:, 2]
try:
p = np.polyfit(sidebands[1:bad_points],
# This is 1 because the peak picker function was calling the 10th order the 9th
locations[1:bad_points], deg=1)
except TypeError:
# if there aren't enough sidebands to fit, give -1
p = [-1, -1]
NIRfreq = p[1]
THzfreq = p[0]
if inspect_plots:
plt.figure("Frequency Fit")
plt.errorbar(sidebands, locations, errors, marker='o')
plt.errorbar(sidebands[:bad_points], locations[:bad_points],
errors[:bad_points], marker='o')
plt.plot(sidebands, np.polyval(p, sidebands))
converter = {
"eV": lambda x: x,
"meV": lambda x: 1000. * x,
"wavenumber": lambda x: 8065.6 * x,
"THz": lambda x: 241.80060 * x,
"GHz": lambda x: 241.80060 * 1e3 * x,
"nm": lambda x: 1239.83 / x
}
freqNIR = converter.get(nir_units, converter["eV"])(NIRfreq)
freqTHz = converter.get(thz_units, converter["eV"])(THzfreq)
return freqNIR, freqTHz
def get_data_and_header(fname, returnOrigin = False):
"""
Given a file to a raw data file, returns the data
and the json decoded header.
Can choose to return the origin header as well
:param fname: Filename to open
:return: data, header (dict)
"""
with open(fname) as fh:
line = fh.readline()
header_string = ''
while line[0]=='#':
header_string += line[1:]
line = fh.readline()
# image files don't have an origin header
if not "Images" in fname:
oh = line
# last readline in loop removes first line in Origin Header
# strip the remaining two
oh += fh.readline()
oh += fh.readline()[:-1] #remove final \n
# data = np.genfromtxt(fh, delimiter=',')
data = np.genfromtxt(fname, delimiter=',')
header = json.loads(header_string)
if returnOrigin:
return data, header, oh
return data, header
def convertTime(timeStr):
"""
The data file headers have the timestamp of data collection. Sometimes you want to
convert that to numbers for data's sake, but I constantly forget the functions
to convert it from the time-stamp string. So here you go
:param timeStr: the time as a string from the data file
:return: int of the time since the epoch
"""
import time
return time.mktime(time.strptime(timeStr, "%x %X%p"))
# photonConverter[A][B](x):
# convert x from A to B.
photon_converter = {
"nm": {"nm": lambda x: x, "eV": lambda x:1239.84/x, "wavenumber": lambda x: 10000000./x},
"eV": {"nm": lambda x: 1239.84/x, "eV": lambda x: x, "wavenumber":lambda x: 8065.56 * x},
"wavenumber": {"nm": lambda x: 10000000./x, "eV": lambda x: x/8065.56, "wavenumber": lambda x: x}
}
####################
# Smoothing functions
####################
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
source:
http://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError as msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = list(range(order + 1))
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid')
def fft_filter(data, cutoffFrequency=1520, inspectPlots=False, tryFitting=False, freqSigma=50, ftol=1e-4,
isInteractive=False):
"""
Performs an FFT, then fits a peak in frequency around the
input with the input width.
If only data is given, it will cut off all frequencies above the default value.
inspectPlots = True will plot the FFT and the filtering at each step, as well as the results
tryFitting = True will try to fit the peak in frequency space centered at the cutoffFrequency
and with a width of freqSigma, using the background function above. Will replace
the peak with the background function. Feature not very well tested
isInteractive: Will pop up interactive windows to move the cutoff frequency and view the
FFT in real time. Requires pyqtgraph and PyQt4 installed (pyqt4 is standard with
anaconda/winpython, but pyqtgraph is not)
"""
# Make a copy so we can return the same thing
retData = np.array(data)
x = np.array(retData[:, 0])
y = np.array(retData[:, -1])
# Let's you place with zero padding.
zeroPadding = len(x)
N = len(x)
if isInteractive:
try:
import pyqtgraph as pg
from PyQt5 import QtCore, QtWidgets
except:
raise ImportError("Cannot do interactive plotting without pyqtgraph installed")
# Need to make some basic classes fir signals and slots to make things simple
k = fft.fftfreq(zeroPadding, x[1] - x[0])
Y = fft.fft(y, n=zeroPadding)
# Make the windows
fftWin = FFTWin(k, np.abs(Y))
realWin = RealWin(np.array(retData), fftWin)
realWin.show()
# Need to pause the program until the frequency is selected
# Done with this qeventloop.
loop = QtCore.QEventLoop()
realWin.sigClosed.connect(loop.exit)
loop.exec_()
# Return with the desired output value
return fft_filter(retData, fftWin.line.value())
if inspectPlots:
plt.figure("Real Space")
plt.plot(x, y, label="Input Data")
# Replicate origin directy
# http://www.originlab.com/doc/Origin-Help/Smooth-Algorithm
# "rotate" the data set so it ends at 0,
# enforcing a periodicity in the data. Otherwise
# oscillatory artifacts result at the ends
onePerc = int(0.01 * N)
x1 = np.mean(x[:onePerc])
x2 = np.mean(x[-onePerc:])
y1 = np.mean(y[:onePerc])
y2 = np.mean(y[-onePerc:])
m = (y1 - y2) / (x1 - x2)
b = y1 - m * x1
flattenLine = m * x + b
y -= flattenLine
if inspectPlots:
plt.plot(x, y, label="Rotated Data")
# Perform the FFT and find the appropriate frequency spacing
k = fft.fftfreq(zeroPadding, x[1] - x[0])
Y = fft.fft(y, n=zeroPadding)
if inspectPlots:
plt.figure("Frequency Space")
plt.semilogy(k, np.abs(Y), label="Raw FFT")
if tryFitting:
try:
# take +/- 4 sigma points around peak to fit to
sl = np.abs(k - cutoffFrequency).argmin() + np.array([-1, 1]) * 10 * freqSigma / np.abs(k[0] - k[1])
sl = slice(*[int(j) for j in sl])
p0 = [cutoffFrequency,
np.abs(Y)[sl].max() * freqSigma, # estimate the height baased on the max in the set
freqSigma,
0.14, 2e3, 1.1] # magic test numbers, they fit the background well
if inspectPlots:
plt.semilogy(k[sl], gaussWithBackground(k[sl], *p0), label="Peak with initial values")
p, _ = curve_fit(gaussWithBackground, k[sl], np.abs(Y)[sl], p0=p0, ftol=ftol)
if inspectPlots:
plt.semilogy(k[sl], gaussWithBackground(k[sl], *p), label="Fitted Peak")
# Want to remove data within 5 sigma ( arb value... )
st = int(p[0] - 5 * p[2])
en = int(p[0] + 5 * p[2])
# Find get the indices to remove.
refitRangeIdx = np.argwhere((k > st) & (k < en))
refitRangeIdxNeg = np.argwhere((k < -st) & (k > -en))
# Replace the data with the backgroudn
# Note: abuses the symmetry of the FFT of a real function
# to get the negative side of the data
Y[refitRangeIdx] = background(k[refitRangeIdx], *p[-2:])
Y[refitRangeIdxNeg] = background(k[refitRangeIdx], *p[-2:])[::-1]
except:
print("ERROR: Trouble fitting the peak in frequency space.\n\t Defaulting to cutting off")
# Assume cutoffFrequency was the peak, not the actual cutoff
# Leaving it alone means half the peak would remain and the data
# wouldn't really be smoothed
cutoffFrequency -= 5 * freqSigma
# Reset this so the next part gets called
tryFitting = False
# "if not" instead of "else" because if the above
# fitting fails, we can default to the sharp cutoff
if not tryFitting:
# Define where to remove the data
st = cutoffFrequency
en = int(max(k)) + 1
# Find the indices to remove the data
refitRangeIdx = np.argwhere((k > st) & (k < en))
refitRangeIdxNeg = np.argwhere((k < -st) & (k > -en))
# Kill it all after the cutoff
Y[refitRangeIdx] = 0
Y[refitRangeIdxNeg] = 0
smoothIdx = np.argwhere((-st < k) & (k < st))
smoothr = -1. / cutoffFrequency ** 2 * k[smoothIdx] ** 2 + 1
Y[smoothIdx] *= smoothr
if inspectPlots:
plt.plot(k, np.abs(Y), label="FFT with removed parts")
a = plt.legend()
a.draggable(True)
# invert the FFT
y = fft.ifft(Y, n=zeroPadding)
# unshift the data
y += flattenLine
# using fft, not rfft, so data may have some
# complex parts. But we can assume they'll be negligible and
# remove them
# ( Safer to use np.real, not np.abs? )
# Need the [:len] to remove zero-padded stuff
y = np.abs(y)[:len(x)]
if inspectPlots:
plt.figure("Real Space")
print(x.size, y.size)
plt.plot(x, y, label="Smoothed Data")
a = plt.legend()
a.draggable(True)
retData[:, 0] = x
retData[:, -1] = y
return retData
def low_pass_filter(x_vals, y_vals, cutoff, inspectPlots=True):
"""
Replicate origin directy
http://www.originlab.com/doc/Origin-Help/Smooth-Algorithm
"rotate" the data set so it ends at 0,
enforcing a periodicity in the data. Otherwise
oscillatory artifacts result at the ends
This uses a 50th order Butterworth filter.
"""
x_vals, y_vals = fourier_prep(x_vals, y_vals)
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Non-nan Data")
zeroPadding = len(x_vals)
# print "zero padding", zeroPadding # This needs to be this way because truncation is bad and actually zero padding
N = len(x_vals)
onePerc = int(0.01 * N)
x1 = np.mean(x_vals[:onePerc])
x2 = np.mean(x_vals[-onePerc:])
y1 = np.mean(y_vals[:onePerc])
y2 = np.mean(y_vals[-onePerc:])
m = (y1 - y2) / (x1 - x2)
b = y1 - m * x1
flattenLine = m * x_vals + b
y_vals -= flattenLine
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Rotated Data")
# even_data = np.column_stack((x_vals, y_vals))
# Perform the FFT and find the appropriate frequency spacing
x_fourier = fft.fftfreq(zeroPadding, x_vals[1] - x_vals[0])
y_fourier = fft.fft(y_vals) # , n=zeroPadding)
if inspectPlots:
plt.figure("Frequency Space")
plt.semilogy(x_fourier, np.abs(y_fourier), label="Raw FFT")
# Define where to remove the data
band_start = cutoff
band_end = int(max(abs(x_fourier))) + 1
'''
# Find the indices to remove the data
refitRangeIdx = np.argwhere((x_fourier > band_start) & (x_fourier <= band_end))
refitRangeIdxNeg = np.argwhere((x_fourier < -band_start) & (x_fourier >= -band_end))
#print "x_fourier", x_fourier[795:804]
#print "max(x_fourier)", max(x_fourier)
#print "refitRangeIdxNeg", refitRangeIdxNeg[:-400]
# Kill it all after the cutoff
y_fourier[refitRangeIdx] = 0
y_fourier[refitRangeIdxNeg] = 0
# This section does a square filter on the remaining code.
smoothIdx = np.argwhere((-band_start < x_fourier) & (x_fourier < band_start))
smoothr = -1 / band_start**2 * x_fourier[smoothIdx]**2 + 1
y_fourier[smoothIdx] *= smoothr
'''
# print abs(y_fourier[-10:])
butterworth = np.sqrt(1 / (1 + (x_fourier / cutoff) ** 100))
y_fourier *= butterworth
if inspectPlots:
plt.plot(x_fourier, np.abs(y_fourier), label="FFT with removed parts")
a = plt.legend()
a.draggable(True)
# print "y_fourier", len(y_fourier)
# invert the FFT
y_vals = fft.ifft(y_fourier, n=zeroPadding)
# using fft, not rfft, so data may have some
# complex parts. But we can assume they'll be negligible and
# remove them
# ( Safer to use np.real, not np.abs? )
# Need the [:len] to remove zero-padded stuff
y_vals = y_vals[:len(x_vals)]
# unshift the data
y_vals += flattenLine
y_vals = np.abs(y_vals)
if inspectPlots:
plt.figure("Real Space")
# print x_vals.size, y_vals.size
plt.plot(x_vals, y_vals, linewidth=3, label="Smoothed Data")
a = plt.legend()
a.draggable(True)
return np.column_stack((x_vals, y_vals))
def high_pass_filter(x_vals, y_vals, cutoff, inspectPlots=True):
"""
Replicate origin directy
http://www.originlab.com/doc/Origin-Help/Smooth-Algorithm
"rotate" the data set so it ends at 0,
enforcing a periodicity in the data. Otherwise
oscillatory artifacts result at the ends
This uses a 50th order Butterworth filter.
"""
x_vals, y_vals = fourier_prep(x_vals, y_vals)
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Non-nan Data")
zeroPadding = len(x_vals)
print("zero padding", zeroPadding) # This needs to be this way because truncation is bad and actually zero padding
N = len(x_vals)
onePerc = int(0.01 * N)
x1 = np.mean(x_vals[:onePerc])
x2 = np.mean(x_vals[-onePerc:])
y1 = np.mean(y_vals[:onePerc])
y2 = np.mean(y_vals[-onePerc:])
m = (y1 - y2) / (x1 - x2)
b = y1 - m * x1
flattenLine = m * x_vals + b
y_vals -= flattenLine
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Rotated Data")
# even_data = np.column_stack((x_vals, y_vals))
# Perform the FFT and find the appropriate frequency spacing
x_fourier = fft.fftfreq(zeroPadding, x_vals[1] - x_vals[0])
y_fourier = fft.fft(y_vals) # , n=zeroPadding)
if inspectPlots:
plt.figure("Frequency Space")
plt.semilogy(x_fourier, np.abs(y_fourier), label="Raw FFT")
# Define where to remove the data
band_start = cutoff
band_end = int(max(abs(x_fourier))) + 1
'''
# Find the indices to remove the data
refitRangeIdx = np.argwhere((x_fourier > band_start) & (x_fourier <= band_end))
refitRangeIdxNeg = np.argwhere((x_fourier < -band_start) & (x_fourier >= -band_end))
#print "x_fourier", x_fourier[795:804]
#print "max(x_fourier)", max(x_fourier)
#print "refitRangeIdxNeg", refitRangeIdxNeg[:-400]
# Kill it all after the cutoff
y_fourier[refitRangeIdx] = 0
y_fourier[refitRangeIdxNeg] = 0
# This section does a square filter on the remaining code.
smoothIdx = np.argwhere((-band_start < x_fourier) & (x_fourier < band_start))
smoothr = -1 / band_start**2 * x_fourier[smoothIdx]**2 + 1
y_fourier[smoothIdx] *= smoothr
'''
print(abs(y_fourier[-10:]))
butterworth = 1 - np.sqrt(1 / (1 + (x_fourier / cutoff) ** 50))
y_fourier *= butterworth
if inspectPlots:
plt.plot(x_fourier, np.abs(y_fourier), label="FFT with removed parts")
a = plt.legend()
a.draggable(True)
print("y_fourier", len(y_fourier))
# invert the FFT
y_vals = fft.ifft(y_fourier, n=zeroPadding)
# using fft, not rfft, so data may have some
# complex parts. But we can assume they'll be negligible and
# remove them
# ( Safer to use np.real, not np.abs? )
# Need the [:len] to remove zero-padded stuff
y_vals = y_vals[:len(x_vals)]
# unshift the data
y_vals += flattenLine
y_vals = np.abs(y_vals)
if inspectPlots:
plt.figure("Real Space")
print(x_vals.size, y_vals.size)
plt.plot(x_vals, y_vals, label="Smoothed Data")
a = plt.legend()
a.draggable(True)
return np.column_stack((x_vals, y_vals))
def band_pass_filter(x_vals, y_vals, cutoff, inspectPlots=True):
"""
Replicate origin directy
http://www.originlab.com/doc/Origin-Help/Smooth-Algorithm
"rotate" the data set so it ends at 0,
enforcing a periodicity in the data. Otherwise
oscillatory artifacts result at the ends
This uses a 50th order Butterworth filter.
"""
x_vals, y_vals = fourier_prep(x_vals, y_vals)
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Non-nan Data")
zeroPadding = len(x_vals)
print("zero padding", zeroPadding) # This needs to be this way because truncation is bad and actually zero padding
N = len(x_vals)
onePerc = int(0.01 * N)
x1 = np.mean(x_vals[:onePerc])
x2 = np.mean(x_vals[-onePerc:])
y1 = np.mean(y_vals[:onePerc])
y2 = np.mean(y_vals[-onePerc:])
m = (y1 - y2) / (x1 - x2)
b = y1 - m * x1
flattenLine = m * x_vals + b
y_vals -= flattenLine
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Rotated Data")
# even_data = np.column_stack((x_vals, y_vals))
# Perform the FFT and find the appropriate frequency spacing
x_fourier = fft.fftfreq(zeroPadding, x_vals[1] - x_vals[0])
y_fourier = fft.fft(y_vals) # , n=zeroPadding)
if inspectPlots:
plt.figure("Frequency Space")
plt.semilogy(x_fourier, np.abs(y_fourier), label="Raw FFT")
# Define where to remove the data
band_start = cutoff
band_end = int(max(abs(x_fourier))) + 1
'''
# Find the indices to remove the data
refitRangeIdx = np.argwhere((x_fourier > band_start) & (x_fourier <= band_end))
refitRangeIdxNeg = np.argwhere((x_fourier < -band_start) & (x_fourier >= -band_end))
#print "x_fourier", x_fourier[795:804]
#print "max(x_fourier)", max(x_fourier)
#print "refitRangeIdxNeg", refitRangeIdxNeg[:-400]
# Kill it all after the cutoff
y_fourier[refitRangeIdx] = 0
y_fourier[refitRangeIdxNeg] = 0
# This section does a square filter on the remaining code.
smoothIdx = np.argwhere((-band_start < x_fourier) & (x_fourier < band_start))
smoothr = -1 / band_start**2 * x_fourier[smoothIdx]**2 + 1
y_fourier[smoothIdx] *= smoothr
'''
print(abs(y_fourier[-10:]))
butterworth = 1 - np.sqrt(1 / (1 + (x_fourier / cutoff[0]) ** 50))
butterworth *= np.sqrt(1 / (1 + (x_fourier / cutoff[1]) ** 50))
y_fourier *= butterworth
if inspectPlots:
plt.plot(x_fourier, np.abs(y_fourier), label="FFT with removed parts")
a = plt.legend()
a.draggable(True)
print("y_fourier", len(y_fourier))
# invert the FFT
y_vals = fft.ifft(y_fourier, n=zeroPadding)
# using fft, not rfft, so data may have some
# complex parts. But we can assume they'll be negligible and
# remove them
# ( Safer to use np.real, not np.abs? )
# Need the [:len] to remove zero-padded stuff
y_vals = y_vals[:len(x_vals)]
# unshift the data
y_vals += flattenLine
y_vals = np.abs(y_vals)
if inspectPlots:
plt.figure("Real Space")
print(x_vals.size, y_vals.size)
plt.plot(x_vals, y_vals, label="Smoothed Data")
a = plt.legend()
a.draggable(True)
return np.column_stack((x_vals, y_vals))
####################
# Complete functions
####################
def proc_n_plotPMT(folder_path, plot=False, confirm_fits=False, save=None, verbose=False, **kwargs):
"""
This function will take a pmt object, process it completely.
:rtype: list of HighSidebandPMT
"""
pmt_data = pmt_sorter(folder_path, plot_individual=plot)
index = 0
for spectrum in pmt_data:
spectrum.integrate_sidebands(verbose=verbose, **kwargs)
spectrum.laser_line(verbose=verbose, **kwargs) # This function is broken
# because process sidebands can't handle the laser line
# print spectrum.full_dict
if plot:
plt.figure('PMT data')
for sb, elem in list(spectrum.sb_dict.items()):
plt.errorbar(elem[:, 0], elem[:, 1], elem[:, 2],
marker='o', label="{} {}".format(spectrum.parameters["series"],sb))
plt.figure('Sideband strengths')
plt.yscale("log")
plt.errorbar(spectrum.sb_results[:, 1], spectrum.sb_results[:, 3], spectrum.sb_results[:, 4],
label=spectrum.parameters['series'], marker='o')
if plot and confirm_fits:
plt.figure('PMT confirm fits')
for elem in list(spectrum.sb_dict.values()):
plt.errorbar(elem[:, 0], elem[:, 1], elem[:, 2], marker='o')
plt.errorbar(spectrum.sb_results[:, 1], spectrum.sb_results[:, 3], spectrum.sb_results[:, 4],
label=spectrum.parameters['series'], marker='o')
plt.ylim([-0.005, 0.025])
if type(save) is tuple:
spectrum.save_processing(save[0], save[1], index=index)
index += 1
elif isinstance(save, str):
dirr = os.path.dirname(save) if os.path.dirname(save) else '.' # if you just pass a filename tos ave
spectrum.save_processing(os.path.basename(save), dirr,
index=index)
index += 1
if plot:
plt.legend()
return pmt_data
def proc_n_plotCCD(folder_path, offset=None, plot=False, confirm_fits=False,
save=None, keep_empties = False, verbose=False, **kwargs):
"""
This function will take a list of ccd files and process it completely.
save_name is a tuple (file_base, folder_path)
keep_empties: If True, keep the HighSidebandCCD object in the list if no sidebands
are found. Else, cut it off.
The cutoff of 8 is too high, but I don't know what to change it to
:rtype: list of HighSidebandCCD
"""
if isinstance(folder_path, list):
file_list = folder_path
else:
# if verbose:
# print "Looking in:", os.path.join(folder_path, '*seq_spectrum.txt')
# file_list = glob.glob(os.path.join(folder_path, '*seq_spectrum.txt'))
file_list = natural_glob(folder_path, '*seq_spectrum.txt')
# if verbose:
# print "found these files:", "\n".join([os.path.basename(ii) for ii in file_list])
raw_list = []
for fname in file_list:
raw_list.append(HighSidebandCCD(fname, spectrometer_offset=offset))
index = 0
for spectrum in raw_list:
try:
spectrum.guess_sidebands(verbose=verbose, plot=plot)
except RuntimeError:
print("\n\n\nNo sidebands??\n\n")
# No sidebands, say it's empty
if not keep_empties:
raw_list.pop(raw_list.index(spectrum))
continue
try:
spectrum.fit_sidebands(plot=plot, verbose=verbose)
except RuntimeError:
print("\n\n\nNo sidebands??\n\n")
# No sidebands, say it's empty
if not keep_empties:
raw_list.pop(raw_list.index(spectrum))
continue
if "calculated NIR freq (cm-1)" not in list(spectrum.parameters.keys()):
spectrum.infer_frequencies()
if plot:
plt.figure('CCD data')
plt.errorbar(spectrum.proc_data[:, 0], spectrum.proc_data[:, 1], spectrum.proc_data[:, 2],
label=spectrum.parameters['series'])
plt.legend()
# plt.yscale('log')
plt.figure('Sideband strengths')
plt.errorbar(spectrum.sb_results[:, 1], spectrum.sb_results[:, 3], spectrum.sb_results[:, 4],
label=spectrum.parameters['series'], marker='o')
plt.legend()
plt.yscale('log')
if plot and confirm_fits:
plt.figure('CCD confirm fits')
plt.plot(spectrum.proc_data[:, 0], spectrum.proc_data[:, 1],# spectrum.proc_data[:, 2],
label=spectrum.parameters['series'])
plt.plot(spectrum.sb_results[:, 1], spectrum.sb_results[:, 3] / spectrum.sb_results[:, 5],# spectrum.sb_results[:, 4],
label=spectrum.parameters['series'], marker='o')
plt.legend()
plt.ylim([-0.1, 1])
if type(save) is tuple:
spectrum.save_processing(save[0], save[1],
marker=spectrum.parameters["series"].replace(
r"/", "p"),
index=index)
index += 1
elif isinstance(save, str):
# print "DEBUG: trying to save CCD with ", os.path.dirname(save),'_at_', os.path.basename(save)
spectrum.save_processing(os.path.basename(save), os.path.dirname(save),
marker=spectrum.parameters["series"].replace(
r"/", "p"),
index=index)
index += 1
return raw_list
def create_full_spectra(folder_path, skipLaser = True, *args, **kwargs):
"""
Given the folder path of raw data (where the PMT data is held in the subfolder "PMT"),
scale all the data to create a raw comb spectra.
:param folder_path:
:param args:
:param kwargs:
:return:
"""
output = np.empty((0,2))
# have proc_n_plot do all the integrating for the sbs
pmt = proc_n_plotPMT(os.path.join(folder_path, "PMT"))
ccd_file_list = glob.glob(os.path.join(folder_path, '*seq_spectrum.txt'))
ccd_list = [HighSidebandCCD(fname) for fname in ccd_file_list]
for pmtsb in sorted(pmt[0].sb_dict.keys()):
if skipLaser and pmtsb == 0: continue
data = pmt[0].sb_dict[pmtsb]
try:
print(pmtsb, pmt[0].full_dict[pmtsb])
except:
continue
output = np.row_stack((output, np.abs(data[:,[0,1]])))
output = np.row_stack((output, [np.nan, np.nan]))
# insert the pmt so I can iterate over scaling consecutive pairs
ccd_list.insert(0, pmt[0])
# make sure all things get scaled down by the factors before them
runningRatio = 1
for idx, ccd in enumerate(ccd_list[1:]):
ccd.guess_sidebands()
ccd.fit_sidebands()
ratio = [1, 1]
stitch_hsg_dicts(ccd_list[idx], ccd, need_ratio = True, ratios=ratio)
print("new ratio", ratio)
runningRatio *= ratio[1]
ccd.proc_data[:,1]*=runningRatio
output = np.row_stack((output, np.abs(ccd.proc_data[:,[0,1]])))
output = np.row_stack((output, [np.nan, np.nan]))
offsetEnergy = (output[:,0] - pmt[0].full_dict[0][0])*1e3
print(offsetEnergy.shape, output.shape)
output = np.column_stack((output[:,0], offsetEnergy.T, output[:,1]))
return output
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
7653,
678,
1511,
25,
1433,
25,
1495,
1853,
198,
198,
31,
9800,
25,
289,
43558,
198,
198,
12679,
21319,
2672,
11,
778,
333,
1240,
9871,
1... | 2.171336 | 70,143 |
import torch
import torch.nn.functional as F
from torch_geometric.nn.conv import GATConv, MessagePassing
from torch_geometric.nn.norm import BatchNorm
from pd_mesh_net.nn.conv import PrimalConv, GATConvNoSelfLoops
class DualPrimalConv(torch.nn.Module):
r"""Implements a modified version of the convolution operation defined on
dual-primal graph convolutional neural networks from the
`"Dual-Primal Graph Convolutional Networks"
<https://arxiv.org/abs/1806.00770>`_ paper, which can work with the
"medial graph"-"simplex mesh" pair defined on a triangular mesh.
Uses the graph-attention networks defined in torch_geometric.nn.conv.GatConv
as a basic unit.
Args:
in_channels_primal, in_channels_dual (int): Size of each input sample
from the primal/dual graph respectively.
out_channels_primal, out_channels_dual (int): Size of each output sample
for the primal/dual graph respectively.
single_dual_nodes (bool): If True, dual graphs are assumed to have
single nodes; otherwise, they are assumed to have double nodes. Cf.
:obj:`pd_mesh_net.utils.GraphCreator`.
undirected_dual_edges (bool): If True, every directed edge in the dual
graphs is assumed to have an opposite directed edge; otherwise,
directed edges in the dual graphs are assumed not to have an
opposite directed edge. Cf. :obj:`pd_mesh_net.utils.GraphCreator`.
heads (int, optional): Number of attention heads associated to the
primal and dual graphs. (default: :obj:`1`)
concat_primal, concat_dual (bool, optional): If set to :obj:`False`, the
attention heads associated respectively to the primal and to the
dual graph are averaged instead of concatenated.
(default: :obj:`True`)
negative_slope_primal, negative_slope_dual (float, optional): LeakyReLU
angle of the negative slope, respectively for the layer associated
to the primal graph and for the layer associated to the dual graph.
(default: :obj:`0.2`)
dropout_primal, dropout_dual (float, optional): Dropout probability of
the normalized attention coefficients which exposes each node,
respectively of the primal and of the dual graph, to a
stochastically sampled neighborhood during training.
(default: :obj:`0`)
bias_primal, bias_dual (bool, optional): If set to :obj:`False`, the
layer associated respectively to the primal and to the dual graph
will not learn an additive bias. (default: :obj:`True`)
add_self_loops_to_dual_graph (bool, optional): If set to :obj:`True`, a
regular graph-attention convolutional layer is instantiated for the
dual graph, thus self-loops are added to the dual graph. If set to
:obj:`False` instead, a modified version of the graph-attention
convolutional layer is instantiated for the dual graph, with no
addition of self-loops to the latter. (default: :obj:`False`)
Attributes:
input_parameters (dict): Stores the value associated to each of the
above input arguments when the instance is created.
primal_attention_coefficients (torch.Tensor of shape
:obj:`[num_primal_edges, num_attention_heads]`, where
`num_primal_edges` is the number of edges in the primal graph
and `num_attention_heads` is the number of attention heads): The
i-th element stores the attention coefficient associated to the i-th
edge in the edge-index matrix of the primal graph.
_dual_layer (torch.geometric.nn.conv.GATConv or
pd_mesh_net.nn.conv.GATConvNoSelfLoops): Dual-graph convolutional
layer.
_primal_layer (pd_mesh_net.nn.conv.PrimalConv): Primal-graph
convolutional layer.
"""
@property
@property
def forward(self, x_primal, x_dual, edge_index_primal, edge_index_dual,
primal_edge_to_dual_node_idx):
r"""Performs the convolution operation on the dual-primal network, by
first performing a GATConv convolution on the dual graph and then
carrying out a modified GATConv convolution on the primal graph, in
which the attention coefficients are computed based on the node features
of the dual graph.
Args:
x_primal (torch.Tensor of shape
:obj:`[num_primal_nodes, in_channels]`, where `num_primal_nodes`
is the number of input nodes of the primal graph and
`in_channels` is the dimensionality of the input node features
of the primal graph): Input node features of the primal graph.
x_dual (torch.Tensor of shape
:obj:`[num_dual_nodes, in_channels_dual]`, where
`num_dual_nodes` is the number of input nodes of the associated
dual graph and `in_channels_dual` is the dimensionality of the
input node features of the associated dual graph): Input node
features of the dual graph. The output of the convolution on
these features in the dual layer will be used to compute the
attention coefficients of the primal graph.
edge_index_primal (torch.Tensor of shape :obj:`[2, num_edges]`]):
List of the edges of the primal graph.
edge_index_dual (torch.Tensor of shape :obj:`[2, num_edges]`]):
List of the edges of the dual graph.
primal_edge_to_dual_node_idx (dict): Dictionary that associates a
tuple, encoding an edge e in the primal graph, to the index of
the node in the dual graph that corresponds to the edge e.
Returns:
x_primal (torch.Tensor of shape
:obj:`[num_primal_nodes, out_channels_primal]`, where
`num_primal_nodes` is the number of nodes of the primal graph
and `out_channels_primal` is the dimensionality of the output
node features of the primal graph): Output node features of the
primal graph.
x_dual (torch.Tensor of shape
:obj:`[num_dual_nodes, out_channels_dual]`, where
`num_dual_nodes` is the number of nodes of the dual graph and
`out_channels_dual` is the dimensionality of the output node
features of the dual graph): Output node features of the dual
graph.
"""
# Convolution on the dual graph.
x_dual = F.relu(self._dual_layer(x_dual, edge_index_dual))
# Convolution on the primal graph.
(x_primal_before_relu,
primal_attention_coefficients) = self._primal_layer(
x_primal, x_dual, edge_index_primal, primal_edge_to_dual_node_idx)
x_primal = F.relu(x_primal_before_relu)
self.__primal_attention_coefficients = primal_attention_coefficients
return x_primal, x_dual
class DualPrimalResConv(torch.nn.Module):
r"""Dual-primal mesh-convolution layer with skip connection. A first
DualPrimalConv layer is followed by a chain of batch-normalization and
DualPrimalConv layers; the output of the first DualPrimalConv layer is added
to the output of the chain. Each batch-normalization layer, as well as the
final output of the layer, has ReLU activation function. Based on the
structure of `MResConv` from MeshCNN
(https://github.com/ranahanocka/MeshCNN/).
Args:
in_channels_primal, in_channels_dual (int): Number of input channels in
the primal/dual convolutional layer respectively.
out_channels_primal, out_channels_dual (int): Number of output channels
in the primal/dual convolutional layer respectively.
heads (int): Number of attention heads associated to each of the primal
and dual convolutional layers.
concat_primal, concat_dual (bool): If set to :obj:`False`, the attention
heads associated respectively to the primal and to the dual
convolutional layers are averaged instead of concatenated.
(default: :obj:`True`)
negative_slope_primal, negative_slope_dual (float): LeakyReLU angle of
the negative slope, respectively for the layers associated to the
primal graph and for the layers associated to the dual graph.
(default: :obj:`0.2`)
dropout_primal, dropout_dual (float): Dropout probability of the
normalized attention coefficients which exposes each node,
respectively of the primal and of the dual convolutional layers, to
a stochastically sampled neighborhood during training.
(default: :obj:`0`)
bias_primal, bias_dual (bool): If set to :obj:`False`, the layers
associated respectively to the primal and to the dual graph will not
learn an additive bias. (default: :obj:`False`)
single_dual_nodes (bool): If True, dual graphs are assumed to have
single nodes; otherwise, they are assumed to have double nodes. Cf.
:obj:`pd_mesh_net.utils.GraphCreator`.
undirected_dual_edges (bool): If True, every directed edge in the dual
graphs is assumed to have an opposite directed edge; otherwise,
directed edges in the dual graphs are assumed not to have an
opposite directed edge. Cf. :obj:`pd_mesh_net.utils.GraphCreator`.
add_self_loops_to_dual_graph (bool): If set to :obj:`True`, regular
graph-attention convolutional layers are instantiated as dual
convolutional layers, thus self-loops are added to the dual graph.
If set to :obj:`False` instead, a modified version of the
graph-attention convolutional layer is instantiated for the dual
graph, with no addition of self-loops to the latter.
num_skips (int, optional): Number of consecutive batch-normalization and
DualPrimalConv layers before the skip connection.
(default: :obj:`1`)
Attributes:
conv0 (pd_mesh_net.nn.conv.DualPrimalConv): First dual-primal
mesh-convolution layer.
primal_attention_coefficients (torch.Tensor of shape
:obj:`[num_primal_edges, num_attention_heads]`, where
`num_primal_edges` is the number of edges in the primal graph
and `num_attention_heads` is the number of attention heads): The
i-th element stores the attention coefficient associated to the i-th
edge in the edge-index matrix of the primal graph in the last
DualPrimalConv layer.
---
bn{i}_primal/dual (torch_geometric.nn.BatchNorm), `i` in
`{1, ..., self.num_skips}`: `i`-th batch-normalization layer,
respectively for the primal and for the dual features. Follows
`self.conv{i - 1}` and has `self.out_channels_primal`/
`self.out_channels_dual` output channels.
conv{i} (pd_mesh_net.nn.conv.DualPrimalConv), `i` in
`{1, ..., self.num_skips}`: `i`-th dual-primal mesh-convolution
layer. Follows `self.bn_primal/dual{i - 1}` and has
`self.out_channels_primal` and `self.out_channels_dual` output
channels for the primal and for the dual features respectively.
"""
@property
def forward(self, x_primal, x_dual, edge_index_primal, edge_index_dual,
primal_edge_to_dual_node_idx):
r"""Forward pass, implements the actual skip connections.
Args:
x_primal (torch.Tensor of shape
:obj:`[num_primal_nodes, in_channels]`, where `num_primal_nodes`
is the number of input nodes of the primal graph and
`in_channels` is the dimensionality of the input node features
of the primal graph): Input node features of the primal graph.
x_dual (torch.Tensor of shape
:obj:`[num_dual_nodes, in_channels_dual]`, where
`num_dual_nodes` is the number of input nodes of the associated
dual graph and `in_channels_dual` is the dimensionality of the
input node features of the associated dual graph): Input node
features of the dual graph. The output of the convolution on
these features in the dual layer will be used to compute the
attention coefficients of the primal graph.
edge_index_primal (torch.Tensor of shape :obj:`[2, num_edges]`]):
List of the edges of the primal graph.
edge_index_dual (torch.Tensor of shape :obj:`[2, num_edges]`]):
List of the edges of the dual graph.
primal_edge_to_dual_node_idx (dict): Dictionary that associates a
tuple, encoding an edge e in the primal graph, to the index of
the node in the dual graph that corresponds to the edge e.
Returns:
x_primal (torch.Tensor of shape
:obj:`[num_primal_nodes, out_channels_primal]`, where
`num_primal_nodes` is the number of nodes of the primal graph
and `out_channels_primal` is the dimensionality of the output
node features of the primal graph): Output node features of the
primal graph.
x_dual (torch.Tensor of shape
:obj:`[num_dual_nodes, out_channels_dual]`, where
`num_dual_nodes` is the number of nodes of the dual graph and
`out_channels_dual` is the dimensionality of the output node
features of the dual graph): Output node features of the dual
graph.
"""
# Apply the first convolution.
x_primal, x_dual = self.conv0(
x_primal=x_primal,
x_dual=x_dual,
edge_index_primal=edge_index_primal,
edge_index_dual=edge_index_dual,
primal_edge_to_dual_node_idx=primal_edge_to_dual_node_idx)
x1_primal = x_primal
x1_dual = x_dual
# Apply the chain of batch-normalization (followed by ReLU) and
# dual-primal mesh-convolution layers to the output of the first
# convolution.
for skip_idx in range(self.__num_skips):
x_primal = getattr(self, f'bn{skip_idx+1}_primal')(F.relu(x_primal))
x_dual = getattr(self, f'bn{skip_idx+1}_dual')(F.relu(x_dual))
x_primal, x_dual = getattr(self, f'conv{skip_idx+1}')(
x_primal=x_primal,
x_dual=x_dual,
edge_index_primal=edge_index_primal,
edge_index_dual=edge_index_dual,
primal_edge_to_dual_node_idx=primal_edge_to_dual_node_idx)
if (self.__num_skips > 0):
# Add the output of the first convolution to the output of the
# chain, and apply ReLU.
x_primal += x1_primal
x_dual += x1_dual
x_primal = F.relu(x_primal)
x_dual = F.relu(x_dual)
return x_primal, x_dual | [
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
62,
469,
16996,
13,
20471,
13,
42946,
1330,
402,
1404,
3103,
85,
11,
16000,
14478,
278,
198,
6738,
28034,
62,
469,
16996,
13,
20471,
13,
27237,
1330,
3... | 2.350568 | 6,518 |
import imp
import os.path as osp
| [
11748,
848,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
628,
198
] | 2.916667 | 12 |
from python_reference.sparsemax_regression import SparsemaxRegression
| [
6738,
21015,
62,
35790,
13,
82,
29572,
9806,
62,
2301,
2234,
1330,
1338,
17208,
9806,
8081,
2234,
198
] | 3.888889 | 18 |
#!/usr/bin/env python
import pytest
from tidyplusPy import md
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
12972,
9288,
198,
6738,
43044,
9541,
20519,
1330,
45243,
198
] | 3 | 21 |
from django import template
register = template.Library()
@register.filter
| [
6738,
42625,
14208,
1330,
11055,
198,
198,
30238,
796,
11055,
13,
23377,
3419,
198,
198,
31,
30238,
13,
24455,
198
] | 3.85 | 20 |
from __future__ import unicode_literals
from datetime import datetime
from django.db.models import Q
from django.utils.translation import ugettext as _
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from api.decorators import link
from api.exceptions import APIException
from rest_framework.decorators import api_view
from api.viewsets import CloudBoltViewSet, ImportExportViewsetMixin, action_return, dict_return
from api.v2.viewsets import ResourceHandlerViewSet, SetEnabledMixin
from resourcehandlers.models import ResourceHandler
from resources.models import Resource
from utilities.logger import ThreadLogger
from resourcehandlers.serializers import ResourceHandlerSerializer
from api.v2.pagination import ResourceHandlerPagination
from extensions.views import admin_extension
from django.shortcuts import render
from utilities.permissions import cbadmin_required
logger = ThreadLogger(__name__)
@admin_extension(title="API Extension")
@api_view(['POST'])
#Sample Payloads
#{
# "template": "templatename",
# "user-name": "myuser",
# "password": "mytemplatepassword"
#}
#
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
1195,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
355,
48... | 3.629851 | 335 |
# =========================================================================================
# Copyright 2015 Community Information Online Consortium (CIOC) and KCL Software Solutions
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================================
#stdlib
import logging
#3rd party
from pyramid.httpexceptions import HTTPFound
from pyramid.view import view_config
from pyramid.security import NO_PERMISSION_REQUIRED
from formencode import Schema, All, Pipe
#this app
from featuredb.lib import security
from featuredb.views.base import ViewBase
from featuredb.views import validators
log = logging.getLogger('featuredb.views.register')
_skip_fields = {'ConfirmPassword', 'TomorrowsDate', 'Password'}
_fields = [x for x in RegistrationSchema.fields.keys() if x not in _skip_fields]
_password_hash_fields = ['PasswordHashRepeat', 'PasswordHashSalt', 'PasswordHash']
| [
2,
38093,
4770,
2559,
198,
2,
220,
15069,
1853,
8108,
6188,
7467,
42727,
357,
25690,
4503,
8,
290,
509,
5097,
10442,
23555,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
19... | 4.027701 | 361 |
# -*- coding: utf-8 -*-
import sys
sys.path.append('sources')
import torch
import random
import unittest
import numpy as np
from torch.utils.data import Dataset
from torchvision import transforms
from predict import predict
from network import Model
from score import AP, meanAP
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
82,
2203,
11537,
198,
11748,
28034,
198,
11748,
4738,
198,
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
... | 3.22549 | 102 |
import numpy as np
def constant_function(x, constant=0):
"""
Returns a constant value `a` such that f(x) = a.
Parameters
============
x: the input value.
constant: the constant value.
"""
return constant
def dirac_delta_function(x, steps=[(1, 1)]):
"""
Returns a Dirac delta function such that
f(x) = y_0 if x = x_0,
y_1 if x = x_1,
...
else 0
Parameters
============
x: the input value.
steps: a list of deltas.
"""
for x_n, y_n in steps:
if x == x_n:
return y_n
else:
return 0
def random_stochastic_function(x, delta):
"""
Creates a random stochastic function that adds a value between
[-delta, delta]
Parameters
============
x: the input value.
delta: defines the range
"""
return (np.random.random_sample() * 2 * delta) - delta
def random_gaussian_function(x, sigma):
"""
Samples from a Gaussian distribution.
Parameters
============
x: the input value.
delta: defines the variance of the Gaussian curve.
"""
return np.random.normal(0, sigma)
| [
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
201,
198,
4299,
6937,
62,
8818,
7,
87,
11,
6937,
28,
15,
2599,
201,
198,
220,
220,
220,
37227,
201,
198,
220,
220,
220,
16409,
257,
6937,
1988,
4600,
64,
63,
884,
326,
277,
7,
8... | 2.202166 | 554 |
import math
import numpy as np
import basis.robot_math as rm
import grasping.annotation.utils as gu
from scipy.spatial import cKDTree
def plan_pushing(hnd_s,
objcm,
cone_angle=math.radians(30),
icosphere_level=2,
local_rotation_interval=math.radians(22.5),
max_samples=100,
min_dist_between_sampled_contact_points=.005,
contact_offset=.002,
toggle_debug=False):
"""
:param hnd_s:
:param objcm:
:param cone_angle:
:param icosphere_level:
:param local_rotation_interval:
:param max_samples:
:param min_dist_between_sampled_contact_points:
:param contact_offset:
:return:
"""
contact_points, contact_normals = objcm.sample_surface(nsample=max_samples,
radius=min_dist_between_sampled_contact_points / 2,
toggle_option='normals')
push_info_list = []
import modeling.geometric_model as gm
for i, cpn in enumerate(zip(contact_points, contact_normals)):
print(f"{i} of {len(contact_points)} done!")
push_info_list += gu.define_pushing(hnd_s,
objcm,
gl_surface_pos=cpn[0] + cpn[1] * contact_offset,
gl_surface_normal=cpn[1],
cone_angle=cone_angle,
icosphere_level=icosphere_level,
local_rotation_interval=local_rotation_interval,
toggle_debug=toggle_debug)
return push_info_list
if __name__ == '__main__':
import os
import basis
import robot_sim.end_effectors.gripper.robotiq85_gelsight.robotiq85_gelsight_pusher as rtqp
import modeling.collision_model as cm
import visualization.panda.world as wd
base = wd.World(cam_pos=[.5, .5, .3], lookat_pos=[0, 0, 0])
gripper_s = rtqp.Robotiq85GelsightPusher()
objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')
objcm = cm.CollisionModel(objpath)
objcm.attach_to(base)
objcm.show_localframe()
push_info_list = plan_pushing(gripper_s, objcm, cone_angle=math.radians(60),
local_rotation_interval=math.radians(45), toggle_debug=False)
for push_info in push_info_list:
gl_push_pos, gl_push_rotmat, hnd_pos, hnd_rotmat = push_info
gic = gripper_s.copy()
gic.fix_to(hnd_pos, hnd_rotmat)
gic.gen_meshmodel().attach_to(base)
base.run()
| [
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4308,
13,
305,
13645,
62,
11018,
355,
42721,
198,
11748,
44787,
13,
1236,
14221,
13,
26791,
355,
915,
198,
6738,
629,
541,
88,
13,
2777,
34961,
1330,
269,
42,
24544,
631,
6... | 1.846774 | 1,488 |
import time
from .udp_class import Room, b
| [
11748,
640,
198,
6738,
764,
463,
79,
62,
4871,
1330,
10096,
11,
275,
628
] | 3.142857 | 14 |
from .prison.prison import env, manual_control, parallel_env, raw_env
| [
6738,
764,
35156,
13,
35156,
1330,
17365,
11,
10107,
62,
13716,
11,
10730,
62,
24330,
11,
8246,
62,
24330,
198
] | 3.5 | 20 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
628
] | 2.891892 | 37 |
import pytest
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from model import Base
# import all entities so they are known to the ORM
from trading.order import Order
from trading.portfolio import Portfolio
from trading.trade import Trade
from trading.wallet import Wallet
@pytest.fixture(autouse=True)
| [
11748,
12972,
9288,
198,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
2377,
283,
876,
62,
8692,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
6246,
10297,
198,
... | 3.769231 | 104 |
from bitcoinrpc.authproxy import AuthServiceProxy
import csv
access = AuthServiceProxy("http://rpcusername:rpcpassword@127.0.0.1:8332")
number = 50
csvfile = open('addresses.csv','a')
writer = csv.writer(csvfile)
while number != 0:
try:
newaddress = access.getnewaddress()
writer.writerow([newaddress])
print(number)
number -= 1
except:
pass
csvfile.close()
| [
6738,
8550,
81,
14751,
13,
18439,
36436,
1330,
26828,
16177,
44148,
198,
11748,
269,
21370,
198,
198,
15526,
796,
26828,
16177,
44148,
7203,
4023,
1378,
81,
79,
9042,
13292,
25,
81,
14751,
28712,
31,
16799,
13,
15,
13,
15,
13,
16,
25,... | 2.46988 | 166 |
"""Example app config."""
from django.apps import AppConfig
class ExampleAppConfig(AppConfig):
"""Example app config."""
name = 'example'
default_auto_field = 'django.db.models.BigAutoField'
| [
37811,
16281,
598,
4566,
526,
15931,
198,
198,
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628,
198,
4871,
17934,
4677,
16934,
7,
4677,
16934,
2599,
198,
220,
220,
220,
37227,
16281,
598,
4566,
526,
15931,
198,
220,
220,
220,
1438... | 3.029412 | 68 |
# Generated by Django 2.0.2 on 2018-03-25 19:04
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
17,
319,
2864,
12,
3070,
12,
1495,
678,
25,
3023,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
#
# client.py
#
# Timothy Graefe, Javamata LLC, Nov 2021
#
import logging
# Django imports
from django.conf import settings
from django.db.utils import DatabaseError
from django.db.backends.base.client import BaseDatabaseClient
from django.core.exceptions import ImproperlyConfigured
# Arango Python driver (python-arango) imports.
from arango import ArangoClient
from arango.exceptions import DocumentCountError
debug_client = True
logger = logging.getLogger('django.db.backends.arangodb')
#
# Create a Database class to be used as a wrapper to the ArangoDB instance.
#
# collections = self.adb.collections()
# idx = [ x for x in range(len(collections)) if collections[x]['name'] == name ]
# if len(idx) == 0:
# return None
# return collections[idx[0]]
| [
2,
198,
2,
5456,
13,
9078,
198,
2,
198,
2,
22283,
7037,
22521,
11,
49247,
321,
1045,
11419,
11,
5267,
33448,
198,
2,
198,
198,
11748,
18931,
198,
198,
2,
37770,
17944,
198,
6738,
42625,
14208,
13,
10414,
220,
220,
220,
220,
220,
2... | 2.481586 | 353 |
"""
Evennia menu system.
Contribution - Griatch 2011
This module offers the ability for admins to let their game be fully
or partly menu-driven. Menu choices can be numbered or use arbitrary
keys. There are also some formatting options, such a putting options
in one or more columns.
The menu system consists of a MenuTree object populated by MenuNode
objects. Nodes are linked together with automatically created commands
so the player may select and traverse the menu. Each node can display
text and show options, but also execute arbitrary code to act on the
system and the calling object when they are selected.
There is also a simple Yes/No function supplied. This will create a
one-off Yes/No question and executes a given code depending on which
choice was made.
To test, add this to the default cmdset
"""
from types import MethodType
from evennia import syscmdkeys
from evennia import Command, CmdSet, utils
from evennia import default_cmds, logger
# imported only to make it available during execution of code blocks
import evennia
CMD_NOMATCH = syscmdkeys.CMD_NOMATCH
CMD_NOINPUT = syscmdkeys.CMD_NOINPUT
#
# Commands used by the Menu system
#
class CmdMenuNode(Command):
"""
Parent for menu selection commands.
"""
key = "selection"
aliases = []
locks = "cmd:all()"
help_category = "Menu"
menutree = None
callback = None
# deprecated
code = None
def func(self):
"Execute a selection"
if self.callback:
try:
self.callback()
except Exception, e:
self.caller.msg("%s\n{rThere was an error with this selection.{n" % e)
elif self.code:
evennia.logger.log_depmsg("menusystem.code is deprecated. Use menusystem.func.")
try:
exec(self.code)
except Exception, e:
self.caller.msg("%s\n{rThere was an error with this selection.{n" % e)
else:
self.caller.msg("{rThis option is not available.{n")
class CmdMenuLook(default_cmds.CmdLook):
"""
ooc look
Usage:
look
This is a Menu version of the look command. It will normally show
the options available, otherwise works like the normal look
command.
"""
key = "look"
aliases = ["l", "ls"]
locks = "cmd:all()"
help_cateogory = "General"
def func(self):
"implement the menu look command"
if self.caller.db._menu_data:
# if we have menu data, try to use that.
lookstring = self.caller.db._menu_data.get("look", None)
if lookstring:
self.caller.msg(lookstring)
return
# otherwise we use normal look
super(CmdMenuLook, self).func()
class CmdMenuHelp(default_cmds.CmdHelp):
"""
help
Usage:
help
Get help specific to the menu, if available. If not,
works like the normal help command.
"""
key = "help"
aliases = "h"
locks = "cmd:all()"
help_category = "Menu"
def func(self):
"implement the menu help command"
if self.caller.db._menu_data:
# if we have menu data, try to use that.
lookstring = self.caller.db._menu_data.get("help", None)
if lookstring:
self.caller.msg(lookstring)
return
# otherwise we use normal help
super(CmdMenuHelp, self).func()
class MenuCmdSet(CmdSet):
"""
Cmdset for the menu. Will replace all other commands.
This always has a few basic commands available.
Note that you must always supply a way to exit the
cmdset manually!
"""
key = "menucmdset"
priority = 1
mergetype = "Replace"
# secure the menu against local cmdsets (but leave channels)
no_objs = True
no_exits = True
no_channels = False
def at_cmdset_creation(self):
"populate cmdset"
pass
#
# Menu Node system
#
class MenuTree(object):
"""
The menu tree object holds the full menu structure consisting of
MenuNodes. Each node is identified by a unique key. The tree
allows for traversal of nodes as well as entering and exiting the
tree as needed. For safety, being in a menu will not survive a
server reboot.
A menutree has two special node keys given by 'startnode' and
'endnode' arguments. The startnode is where the user will start
upon first entering the menu. The endnode need not actually
exist, the moment it is linked to and that link is used, the menu
will be exited and cleanups run. The default keys for these are
'START' and 'END' respectively.
"""
def __init__(self, caller, nodes=None,
startnode="START", endnode="END", exec_end="look"):
"""
We specify startnode/endnode so that the system knows where to
enter and where to exit the menu tree. If nodes is given, it
should be a list of valid node objects to add to the tree.
exec_end - if not None, will execute the given command string
directly after the menu system has been exited.
"""
self.tree = {}
self.startnode = startnode
self.endnode = endnode
self.exec_end = exec_end
self.caller = caller
if nodes and utils.is_iter(nodes):
for node in nodes:
self.add(node)
def start(self):
"""
Initialize the menu
"""
self.goto(self.startnode)
def add(self, menunode):
"""
Add a menu node object to the tree. Each node itself keeps
track of which nodes it is connected to.
"""
self.tree[menunode.key] = menunode
def goto(self, key):
"""
Go to a key in the tree. This sets up the cmdsets on the
caller so that they match the choices in that node.
"""
if key == self.endnode:
# if we was given the END node key, we clean up immediately.
self.caller.cmdset.delete("menucmdset")
del self.caller.db._menu_data
if self.exec_end is not None:
self.caller.execute_cmd(self.exec_end)
return
# not exiting, look for a valid code.
node = self.tree.get(key, None)
# make caller available on node
node.caller = self.caller
if node:
# call on-node callback
if node.callback:
try:
node.callback()
except Exception:
logger.log_trace()
self.caller.msg("{rNode callback could not be executed for node %s. Continuing anyway.{n" % key)
if node.code:
# Execute eventual code active on this node. self.caller is available at this point.
evennia.logger.log_depmsg("menusystem.code is deprecated. Use menusystem.callback.")
try:
exec(node.code)
except Exception:
self.caller.msg("{rCode could not be executed for node %s. Continuing anyway.{n" % key)
# initialize - this creates new cmdset
node.init(self)
# clean old menu cmdset and replace with the new one
self.caller.cmdset.delete("menucmdset")
self.caller.cmdset.add(node.cmdset)
# set the menu flag data for the default commands
self.caller.db._menu_data = {"help": node.helptext,
"look": str(node.text)}
# display the node
self.caller.msg(node.text)
else:
self.caller.msg("{rMenu node '%s' does not exist - maybe it's not created yet..{n" % key)
class MenuNode(object):
"""
This represents a node in a menu tree. The node will display its
textual content and offer menu links to other nodes (the relevant
commands are created automatically)
"""
def __init__(self, key, text="", links=None, linktexts=None,
keywords=None, cols=1, helptext=None,
selectcmds=None, callback=None, code="", nodefaultcmds=False, separator=""):
"""
key - the unique identifier of this node.
text - is the text that will be displayed at top when viewing this
node.
links - a list of keys for unique menunodes this is connected to.
The actual keys will not printed - keywords will be used
(or a number)
linktexts - an optional list of texts to describe the links. Must
match link list if defined. Entries can be None to not
generate any extra text for a particular link.
keywords - an optional list of unique keys for choosing links. Must
match links list. If not given, index numbers will be used.
Also individual list entries can be None and will be replaed
by indices. If CMD_NOMATCH or CMD_NOENTRY, no text will be
generated to indicate the option exists.
cols - how many columns to use for displaying options.
helptext - if defined, this is shown when using the help command
instead of the normal help index.
selectcmds- a list of custom cmdclasses for handling each option.
Must match links list, but some entries may be set to None
to use default menu cmds. The given command's key will be
used for the menu list entry unless it's CMD_NOMATCH or
CMD_NOENTRY, in which case no text will be generated. These
commands have access to self.menutree and so can be used to
select nodes.
code - functional code. Deprecated. This will be executed just before this
node is loaded (i.e. as soon after it's been selected from
another node). self.caller is available to call from this
code block, as well as the evennia flat API.
callback - function callback. This will be called as callback(currentnode) just
before this node is loaded (i.e. as soon as possible as it's
been selected from another node). currentnode.caller is available.
nodefaultcmds - if true, don't offer the default help and look commands
in the node
separator - this string will be put on the line between menu nodes.
"""
self.key = key
self.cmdset = None
self.links = links
self.linktexts = linktexts
self.keywords = keywords
self.cols = cols
self.selectcmds = selectcmds
self.code = code
self.callback = MethodType(callback, self, MenuNode) if callback else None
self.nodefaultcmds = nodefaultcmds
self.separator = separator
Nlinks = len(self.links)
if code:
evennia.logger.log_depmsg("menusystem.code is deprecated. Use menusystem.callback.")
# validate the input
if not self.links:
self.links = []
if not self.linktexts or (len(self.linktexts) != Nlinks):
self.linktexts = [None for i in range(Nlinks)]
if not self.keywords or (len(self.keywords) != Nlinks):
self.keywords = [None for i in range(Nlinks)]
if not selectcmds or (len(self.selectcmds) != Nlinks):
self.selectcmds = [None for i in range(Nlinks)]
# Format default text for the menu-help command
if not helptext:
helptext = "Select one of the valid options ("
for i in range(Nlinks):
if self.keywords[i]:
if self.keywords[i] not in (CMD_NOMATCH, CMD_NOINPUT):
helptext += "%s, " % self.keywords[i]
else:
helptext += "%s, " % (i + 1)
helptext = helptext.rstrip(", ") + ")"
self.helptext = helptext
# Format text display
string = ""
if text:
string += "%s\n" % text
# format the choices into as many columns as specified
choices = []
for ilink, link in enumerate(self.links):
choice = ""
if self.keywords[ilink]:
if self.keywords[ilink] not in (CMD_NOMATCH, CMD_NOINPUT):
choice += "{g{lc%s{lt%s{le{n" % (self.keywords[ilink], self.keywords[ilink])
else:
choice += "{g {lc%i{lt%i{le{n" % ((ilink + 1), (ilink + 1))
if self.linktexts[ilink]:
choice += " - %s" % self.linktexts[ilink]
choices.append(choice)
cols = [[] for i in range(min(len(choices), cols))]
while True:
for i in range(len(cols)):
if not choices:
cols[i].append("")
else:
cols[i].append(choices.pop(0))
if not choices:
break
ftable = utils.format_table(cols)
for row in ftable:
string += "\n" + "".join(row)
# store text
self.text = self.separator + "\n" + string.rstrip()
def init(self, menutree):
"""
Called by menu tree. Initializes the commands needed by
the menutree structure.
"""
# Create the relevant cmdset
self.cmdset = MenuCmdSet()
if not self.nodefaultcmds:
# add default menu commands
self.cmdset.add(CmdMenuLook())
self.cmdset.add(CmdMenuHelp())
for i, link in enumerate(self.links):
if self.selectcmds[i]:
cmd = self.selectcmds[i]()
else:
# this is the operable command, it moves us to the next node.
cmd = CmdMenuNode()
cmd.key = str(i + 1)
cmd.link = link
cmd.callback = MethodType(_callback, cmd, CmdMenuNode)
# also custom commands get access to the menutree.
cmd.menutree = menutree
if self.keywords[i] and cmd.key not in (CMD_NOMATCH, CMD_NOINPUT):
cmd.aliases = [self.keywords[i]]
self.cmdset.add(cmd)
def __str__(self):
"Returns the string representation."
return self.text
#
# A simple yes/no question. Call this from a command to give object
# a cmdset where they may say yes or no to a question. Does not
# make use the node system since there is only one level of choice.
#
def prompt_yesno(caller, question="", yesfunc=None, nofunc=None, yescode="", nocode="", default="N"):
"""
This sets up a simple yes/no questionnaire. Question will be
asked, followed by a Y/[N] prompt where the [x] signifies the
default selection. Note that this isn't making use of the menu
node system.
yesfunc - function callback to be called as yesfunc(self) when choosing yes (self.caller is available)
nofunc - function callback to be called as yesfunc(self) when choosing no (self.caller is available)
yescode - deprecated, executable code
nocode - "
"""
# creating and defining commands
cmdyes = CmdMenuNode(key="yes", aliases=["y"])
if yesfunc:
cmdyes.yesfunc = yesfunc
cmdyes.callback = MethodType(_yesfunc, cmdyes, CmdMenuNode)
cmdno = CmdMenuNode(key="no", aliases=["n"])
if nofunc:
cmdno.nofunc = nofunc
cmdno.callback = MethodType(_nofunc, cmdno, CmdMenuNode)
errorcmd = CmdMenuNode(key=CMD_NOMATCH)
errorcmd.callback = MethodType(_errorcmd, errorcmd, CmdMenuNode)
defaultcmd = CmdMenuNode(key=CMD_NOINPUT)
defaultcmd.callback = MethodType(_defaultcmd, defaultcmd, CmdMenuNode)
# code exec is deprecated:
if yescode:
evennia.logger.log_depmsg("yesnosystem.code is deprecated. Use yesnosystem.callback.")
cmdyes.code = yescode + "\nself.caller.cmdset.delete('menucmdset')\ndel self.caller.db._menu_data"
if nocode:
evennia.logger.log_depmsg("yesnosystem.code is deprecated. Use yesnosystem.callback.")
cmdno.code = nocode + "\nself.caller.cmdset.delete('menucmdset')\ndel self.caller.db._menu_data"
# creating cmdset (this will already have look/help commands)
yesnocmdset = MenuCmdSet()
yesnocmdset.add(cmdyes)
yesnocmdset.add(cmdno)
yesnocmdset.add(errorcmd)
yesnocmdset.add(defaultcmd)
yesnocmdset.add(CmdMenuLook())
yesnocmdset.add(CmdMenuHelp())
# assinging menu data flags to caller.
caller.db._menu_data = {"help": "Please select Yes or No.",
"look": "Please select Yes or No."}
# assign cmdset and ask question
caller.cmdset.add(yesnocmdset)
if default == "Y":
prompt = "{lcY{lt[Y]{le/{lcN{ltN{le"
else:
prompt = "{lcY{ltY{le/{lcN{lt[N]{le"
prompt = "%s %s: " % (question, prompt)
caller.msg(prompt)
#
# A simple choice question. Call this from a command to give object
# a cmdset where they need to make a choice. Does not
# make use the node system since there is only one level of choice.
#
def prompt_choice(caller, question="", prompts=None, choicefunc=None, force_choose=False):
"""
This sets up a simple choice questionnaire. Question will be
asked, followed by a series of prompts. Note that this isn't
making use of the menu node system.
caller - the object calling and being offered the choice
question - text describing the offered choice
prompts - list of choices
choicefunc - functions callback to be called as func(self) when
make choice (self.caller is available) The function's definition
should be like func(self, menu_node), and menu_node.key is user's
choice.
force_choose - force user to make a choice or not
"""
# creating and defining commands
count = 0
choices = ""
commands = []
for choice in utils.make_iter(prompts):
count += 1
choices += "\n{lc%d{lt[%d]{le %s" % (count, count, choice)
cmdfunc = CmdMenuNode(key="%d" % count)
if choicefunc:
cmdfunc.choicefunc = choicefunc
cmdfunc.callback = MethodType(_choicefunc, cmdfunc, CmdMenuNode)
commands.append(cmdfunc)
if not force_choose:
choices += "\n{lc{lt[No choice]{le"
prompt = question + choices + "\nPlease choose one."
errorcmd = CmdMenuNode(key=CMD_NOMATCH)
if force_choose:
else:
if choicefunc:
errorcmd.choicefunc = choicefunc
errorcmd.callback = MethodType(_errorcmd, errorcmd, CmdMenuNode)
defaultcmd = CmdMenuNode(key=CMD_NOINPUT)
if force_choose:
else:
if choicefunc:
defaultcmd.choicefunc = choicefunc
defaultcmd.callback = MethodType(_defaultcmd, defaultcmd, CmdMenuNode)
# creating cmdset (this will already have look/help commands)
choicecmdset = MenuCmdSet()
for cmdfunc in commands: choicecmdset.add(cmdfunc)
choicecmdset.add(errorcmd)
choicecmdset.add(defaultcmd)
choicecmdset.add(CmdMenuLook())
choicecmdset.add(CmdMenuHelp())
# assigning menu data flags to caller.
caller.db._menu_data = {"help": "Please select.",
"look": prompt}
# assign cmdset and ask question
caller.cmdset.add(choicecmdset)
caller.msg(prompt)
#
# Menu command test
#
class CmdMenuTest(Command):
"""
testing menu module
Usage:
menu
menu yesno
This will test the menu system. The normal operation will produce
a small menu tree you can move around in. The 'yesno' option will
instead show a one-time yes/no question.
"""
key = "menu"
locks = "cmd:all()"
help_category = "Menu"
def func(self):
"Testing the menu system"
if self.args.strip() == "yesno":
"Testing the yesno question"
prompt_yesno(self.caller, question="Please answer yes or no - Are you the master of this mud or not?",
yesfunc=lambda self: self.caller.msg('{gGood for you!{n'),
nofunc=lambda self: self.caller.msg('{GNow you are just being modest ...{n'),
default="N")
else:
# testing the full menu-tree system
node0 = MenuNode("START", text="Start node. Select one of the links below. Here the links are ordered in one column.",
links=["node1", "node2", "END"], linktexts=["Goto first node", "Goto second node", "Quit"])
node1 = MenuNode("node1", text="First node. This node shows letters instead of numbers for the choices.",
links=["END", "START"], linktexts=["Quit", "Back to start"], keywords=["q","b"])
node2 = MenuNode("node2", text="Second node. This node lists choices in two columns.",
links=["node3", "START"], linktexts=["Set an attribute", "Back to start"], cols=2)
node3 = MenuNode("node3", text="Attribute 'menutest' set on you. You can examine it (only works if you are allowed to use the examine command) or remove it. You can also quit and examine it manually.",
links=["node4", "node5", "node2", "END"], linktexts=["Remove attribute", "Examine attribute",
"Back to second node", "Quit menu"], cols=2,
callback=lambda self: self.caller.attributes.add("menutest",'Testing!'))
node4 = MenuNode("node4", text="Attribute 'menutest' removed again.",
links=["node2"], linktexts=["Back to second node."], cols=2,
callback=lambda self: self.caller.attributes.remove("menutest"))
node5 = MenuNode("node5", links=["node4", "node2"], linktexts=["Remove attribute", "Back to second node."], cols=2,
callback=lambda self: self.caller.msg('%s/%s = %s' % (self.caller.key, 'menutest', self.caller.db.menutest)))
menu = MenuTree(self.caller, nodes=(node0, node1, node2, node3, node4, node5))
menu.start()
| [
37811,
198,
6104,
18142,
6859,
1080,
13,
198,
198,
4264,
3890,
532,
20914,
963,
2813,
198,
198,
1212,
8265,
4394,
262,
2694,
329,
44563,
284,
1309,
511,
983,
307,
3938,
198,
273,
11476,
6859,
12,
15808,
13,
21860,
7747,
460,
307,
2584... | 2.334308 | 9,575 |
#!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN 'AS IS' BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
import unittest
from unittest import mock
from aops_utils.restful.response import MyResponse
from adoctor_diag_scheduler.function.helper import get_time_slices, get_trees_content, \
get_tree_from_database, get_valid_hosts
class TestTimeSlice(unittest.TestCase):
"""
Test split time range
"""
class TestGetTrees(unittest.TestCase):
"""
Test get valid trees from database
"""
@mock.patch("adoctor_diag_scheduler.function.helper.get_tree_from_database")
@mock.patch.object(MyResponse, "get_response")
@mock.patch.object(MyResponse, "get_response")
@mock.patch.object(MyResponse, "get_response")
@mock.patch.object(MyResponse, "get_response")
class TestGetHosts(unittest.TestCase):
"""
Test get valid hosts from database
"""
@mock.patch.object(MyResponse, "get_response")
@mock.patch.object(MyResponse, "get_response")
@mock.patch.object(MyResponse, "get_response")
@mock.patch.object(MyResponse, "get_response")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
41906,
17174,
46068,
1174,
198,
2,
15069,
357,
66,
8,
43208,
21852,
1766,
1539,
12052,
13,
33448,
12,
1238,
2481,
13,
1439,
2489,
10395,
13,
198,
2,
11971,
739,
262,
17996,
272,
6599... | 3.008757 | 571 |
import json
from .base import BaseTestArticles
from rest_framework.views import status
from ..models import Article
from ...authentication.models import User
from ....apps.profiles.models import Profile
| [
11748,
33918,
198,
6738,
764,
8692,
1330,
7308,
14402,
8001,
2983,
198,
6738,
1334,
62,
30604,
13,
33571,
1330,
3722,
198,
6738,
11485,
27530,
1330,
10172,
198,
6738,
2644,
41299,
3299,
13,
27530,
1330,
11787,
198,
6738,
19424,
18211,
13,... | 4.22449 | 49 |
blacklist=[
'finger her'
'N I G G E R',
'#removethemole',
'super hottie',
'thicc',
't h i c c',
'u are hot',
'you are hot',
'your hot',
'ur hot',
'u are sexy',
'you are sexy',
'your sexy',
'ur sexy',
'why is this in just chatting',
'change category',
'u a virgin',
'wrong category',
'si1mp',
's1mp',
'simp',
'si(.*)mp',
'T H I C C',
'only fans',
'onlyfans',
'thicc(.*)Botez',
'hot(.*) Botez',
'hot(.*) Andrea',
'hot(.*) Alex',
'thicc(.*) Alex',
'thicc(.*) andrea',
'Botez(.*) thicc',
'Botez(.*) hot',
'Andrea(.*) hot',
'Alex(.*) hot',
'Alex(.*) thicc',
'andrea(.*) thicc',
'she(.*) thicc',
'sexy(.*) Alex',
'sexy(.*) andrea',
'Botez(.*) sexy',
'sexy(.*) Botez',
'Andrea(.*) sexy',
'Alex(.*) sexy',
'andrea(.*) sexy',
'ur so hawt',
'u so hawt',
'rape',
'your butthole',
'my butthole',
'give me a kiss',
'gimme a kiss',
'blow me a kiss',
'whore',
'pussy',
'cunt',
'suck dick',
'lick(.*) feet',
'finger you',
'suck my',
'whore',
'simp',
'lick(.*) toes',
'suck(.*) toes',
'your vagina',
'your vag',
'show vag',
'vagene',
'show bobs',
'vagoo',
'your booty',
'ur so hot',
'u so hot',
'slut',
]
graylist=[
'thicc',
'look(.*)tired',
'sexy',
]
whitelist=[
'LUL',
'KEKW',
#'siema',
#'strimp',
]
| [
13424,
4868,
41888,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
705,
35461,
607,
6,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
705,
45,
314,
402,... | 1.482493 | 1,428 |
from lxml import etree
from signxml import XMLSigner, XMLVerifier
from pprint import pprint
data_to_sign = "<Test/>"
cert = open("../idp/azure_ad_app_signing_cert.pem").read()
key = open("../idp/azure_ad_app_signing_key.pem").read()
root = etree.fromstring(data_to_sign)
signed_root = XMLSigner().sign(root, key=key, cert=cert)
verified_data = XMLVerifier().verify(signed_root).signed_xml
signed_data = etree.tostring(signed_root, encoding='utf8', method='xml', pretty_print=True)
print("Signed Data:")
print(signed_data.decode('utf-8'))
| [
6738,
300,
19875,
1330,
2123,
631,
198,
6738,
1051,
19875,
1330,
23735,
11712,
263,
11,
23735,
13414,
7483,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
198,
7890,
62,
1462,
62,
12683,
796,
33490,
14402,
15913,
1,
198,
22583,
796,
1280,
... | 2.708543 | 199 |
"""
Dummy node, two pointers, swap nodes
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next | [
37811,
198,
35,
13513,
10139,
11,
734,
32007,
11,
16075,
13760,
198,
37811,
198,
198,
2,
30396,
329,
1702,
306,
12,
25614,
1351,
13,
198,
2,
1398,
7343,
19667,
7,
15252,
2599,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
... | 2.457831 | 83 |
import os
from test_base import TestBase
from onadata.apps.logger.models import XForm
| [
11748,
28686,
198,
198,
6738,
1332,
62,
8692,
1330,
6208,
14881,
198,
6738,
319,
14706,
13,
18211,
13,
6404,
1362,
13,
27530,
1330,
1395,
8479,
628
] | 3.384615 | 26 |
import re
import csv
from typing import TextIO
from .core_gsheets import get_values
from .common import filter_personal_reasons
| [
11748,
302,
198,
11748,
269,
21370,
198,
198,
6738,
19720,
1330,
8255,
9399,
198,
198,
6738,
764,
7295,
62,
70,
42011,
1330,
651,
62,
27160,
198,
6738,
764,
11321,
1330,
8106,
62,
22682,
62,
260,
2812,
628
] | 3.540541 | 37 |
"""
software模块添加新的 x_开头的算法
software模块添加首新人的github和dockerhub账号
"""
from cytomine import Cytomine
from cytomine.models import SoftwareUserRepository
Cytomine.connect("192.168.52.120", "00d8474f-7fd8-4c50-bf8e-79973dcf7bc0", "2e088a54-26ab-4f8e-9bd5-d861479ecbfe")
SoftwareUserRepository(provider="Github", username="zhang-free", docker_username="zhangpenghui", prefix="S_").save()
| [
37811,
198,
43776,
162,
101,
94,
161,
251,
245,
162,
115,
119,
27950,
254,
23877,
108,
21410,
2124,
62,
28156,
222,
13783,
112,
21410,
163,
106,
245,
37345,
243,
198,
43776,
162,
101,
94,
161,
251,
245,
162,
115,
119,
27950,
254,
16... | 2 | 190 |
# Generated by Django 3.0.8 on 2020-07-04 16:15
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
23,
319,
12131,
12,
2998,
12,
3023,
1467,
25,
1314,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
from pypy.interpreter.error import OperationError
from pypy.interpreter.gateway import unwrap_spec
@unwrap_spec(lo=int, hi=int)
def bisect_left(space, w_a, w_x, lo=0, hi=-1):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, i points just
before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched."""
if lo < 0:
raise OperationError(space.w_ValueError,
space.wrap("lo must be non-negative"))
if hi == -1:
hi = space.len_w(w_a)
while lo < hi:
mid = (lo + hi) >> 1
w_litem = space.getitem(w_a, space.wrap(mid))
if space.is_true(space.lt(w_litem, w_x)):
lo = mid + 1
else:
hi = mid
return space.wrap(lo)
@unwrap_spec(lo=int, hi=int)
def bisect_right(space, w_a, w_x, lo=0, hi=-1):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, i points just
beyond the rightmost x already there
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched."""
if lo < 0:
raise OperationError(space.w_ValueError,
space.wrap("lo must be non-negative"))
if hi == -1:
hi = space.len_w(w_a)
while lo < hi:
mid = (lo + hi) >> 1
w_litem = space.getitem(w_a, space.wrap(mid))
if space.is_true(space.lt(w_x, w_litem)):
hi = mid
else:
lo = mid + 1
return space.wrap(lo)
| [
6738,
279,
4464,
88,
13,
3849,
3866,
353,
13,
18224,
1330,
14680,
12331,
198,
6738,
279,
4464,
88,
13,
3849,
3866,
353,
13,
10494,
1014,
1330,
7379,
2416,
62,
16684,
628,
198,
31,
403,
37150,
62,
16684,
7,
5439,
28,
600,
11,
23105,
... | 2.258469 | 797 |
"""
Voxel-Based Morphometry on Oasis dataset with Space-Net prior
=============================================================
Predicting age from gray-matter concentration maps from OASIS
dataset. Note that age is a continuous variable, we use the regressor
here, and not the classification object.
See also the SpaceNet documentation: :ref:`space_net`.
"""
# Authors: DOHMATOB Elvis
# FRITSCH Virgile
###########################################################################
# Load the Oasis VBM dataset
# ---------------------------
import numpy as np
from nilearn import datasets
n_subjects = 200 # increase this number if you have more RAM on your box
dataset_files = datasets.fetch_oasis_vbm(n_subjects=n_subjects)
age = dataset_files.ext_vars['age'].astype(float)
age = np.array(age)
gm_imgs = np.array(dataset_files.gray_matter_maps)
# Split data into training set and test set
from sklearn.utils import check_random_state
from sklearn.model_selection import train_test_split
rng = check_random_state(42)
gm_imgs_train, gm_imgs_test, age_train, age_test = train_test_split(
gm_imgs, age, train_size=.6, random_state=rng)
# Sort test data for better visualization (trend, etc.)
perm = np.argsort(age_test)[::-1]
age_test = age_test[perm]
gm_imgs_test = gm_imgs_test[perm]
###########################################################################
# Fit the SpaceNet and predict with it
# -------------------------------------
from nilearn.decoding import SpaceNetRegressor
# To save time (because these are anat images with many voxels), we include
# only the 5-percent voxels most correlated with the age variable to fit.
# Also, we set memory_level=2 so that more of the intermediate computations
# are cached. Also, you may pass and n_jobs=<some_high_value> to the
# SpaceNetRegressor class, to take advantage of a multi-core system.
#
# Also, here we use a graph-net penalty but more beautiful results can be
# obtained using the TV-l1 penalty, at the expense of longer runtimes.
decoder = SpaceNetRegressor(memory="nilearn_cache", penalty="graph-net",
screening_percentile=5., memory_level=2)
decoder.fit(gm_imgs_train, age_train) # fit
coef_img = decoder.coef_img_
y_pred = decoder.predict(gm_imgs_test).ravel() # predict
mse = np.mean(np.abs(age_test - y_pred))
print('Mean square error (MSE) on the predicted age: %.2f' % mse)
###########################################################################
# Visualize the resulting maps
from nilearn.plotting import plot_stat_map, show
# weights map
background_img = gm_imgs[0]
plot_stat_map(coef_img, background_img, title="graph-net weights",
display_mode="z", cut_coords=1)
###########################################################################
# Visualize the quality of predictions
# -------------------------------------
import matplotlib.pyplot as plt
plt.figure()
plt.suptitle("graph-net: Mean Absolute Error %.2f years" % mse)
linewidth = 3
ax1 = plt.subplot('211')
ax1.plot(age_test, label="True age", linewidth=linewidth)
ax1.plot(y_pred, '--', c="g", label="Predicted age", linewidth=linewidth)
ax1.set_ylabel("age")
plt.legend(loc="best")
ax2 = plt.subplot("212")
ax2.plot(age_test - y_pred, label="True age - predicted age",
linewidth=linewidth)
ax2.set_xlabel("subject")
plt.legend(loc="best")
show()
| [
37811,
198,
53,
1140,
417,
12,
15001,
41170,
15748,
319,
440,
17765,
27039,
351,
4687,
12,
7934,
3161,
198,
10052,
4770,
25609,
28,
198,
198,
47,
17407,
278,
2479,
422,
12768,
12,
47635,
10368,
8739,
422,
440,
1921,
1797,
198,
19608,
... | 3.133953 | 1,075 |
from django import forms
from django.utils.translation import ugettext_lazy as _
from api.dc.template.views import dc_template
from gui.forms import SerializerForm
class TemplateForm(SerializerForm):
"""
Create or remove DC<->VmTemplate link by calling dc_template.
"""
_api_call = dc_template
name = forms.ChoiceField(label=_('Template'), required=True,
widget=forms.Select(attrs={'class': 'input-select2 narrow disable_created2'}))
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
198,
198,
6738,
40391,
13,
17896,
13,
28243,
13,
33571,
1330,
30736,
62,
28243,
198,
6738,
11774,
13,
23914,... | 2.815029 | 173 |
import unittest
import networkx as nx
import dstream
| [
11748,
555,
715,
395,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
288,
5532,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
19... | 1.528571 | 70 |
# coding: utf-8
import re
import os
import json
import logging
from collections import namedtuple
from os.path import join, abspath
from ansible import constants as C
from ansible.playbook.play import Play
from ansible.errors import AnsibleError
from ansible.utils.path import unfrackpath
from ansible.parsing.dataloader import DataLoader
from ansible.plugins.callback import CallbackBase
from ansible.vars.manager import VariableManager
from ansible.inventory.manager import InventoryManager
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.executor.playbook_executor import PlaybookExecutor
from tiops import exceptions
from tiops import utils
from tiops.tui import term
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ANSRunner(object):
"""
This is a General object for parallel execute modules.
"""
def __initializeData(self):
""" 初始化ansible """
C.DEFAULT_FILTER_PLUGIN_PATH.append(
'{}/tiops/ansibleapi/plugins/filter'.format(os.environ['TIUP_COMPONENT_INSTALL_DIR']))
C.HOST_KEY_CHECKING = False
C.ANSIBLE_SSH_ARGS = '-C -o ControlMaster=auto -o ControlPersist=1d'
C.PIPELINING = True
C.CACHE_PLUGIN = 'jsonfile'
C.CACHE_PLUGIN_CONNECTION = '~/.ansible/ansible_fact_cache'
C.CACHE_PLUGIN_TIMEOUT = 86400
C.DEFAULT_GATHER_TIMEOUT = 120
Options = namedtuple('Options',
['connection',
'module_path',
'forks',
'timeout',
'remote_user',
'ask_pass',
'private_key_file',
'ssh_common_args',
'ssh_extra_args',
'sftp_extra_args',
'scp_extra_args',
'become',
'become_method',
'become_user',
'ask_value_pass',
'verbosity',
'check',
'listhosts',
'listtasks',
'listtags',
'syntax',
'diff'])
self.options = Options(connection='smart',
module_path=None,
forks=self.forks,
timeout=60,
remote_user=self.user,
ask_pass=False,
private_key_file=self.private_key,
ssh_common_args=None,
ssh_extra_args=None,
sftp_extra_args=None,
scp_extra_args=None,
become=None,
become_method='sudo',
become_user='root',
ask_value_pass=False,
verbosity=None,
check=False,
listhosts=False,
listtasks=False,
listtags=False,
syntax=False,
diff=True)
# generate an Ansible inventory object from the topology
self.password = dict(conn_pass=self.password)
self.loader = DataLoader()
self.ips = self.list_ip_check()
if self.ips:
self.inventory = InvManager(loader=self.loader,
sources=self.ips)
self.variable_manager = VariableManager(
loader=DataLoader(), inventory=self.inventory)
else:
if self.topology:
self.inventory = InvManager(loader=self.loader,
sources=self.inventory)
inventory(self.topology)
def run_model(self, module_name, module_args, become=False, register=None, with_items=None, group='*',
extra_vars=None, node=None):
"""
run module from andible ad-hoc.
module_name: ansible module_name
module_args: ansible module args
"""
if self.topology:
service_names = {'node_exporter': ['monitored_servers', 'node_exporter_port'],
'blackbox_exporter': ['monitored_servers', 'blackbox_exporter_port'],
'prometheus': ['monitoring_server', 'prometheus_port'],
'pushgateway': ['monitoring_server', 'pushgateway_port']}
if extra_vars in service_names and service_names[extra_vars][0] in self.inventory.get_groups_dict():
for host in self.inventory.get_groups_dict()[service_names[extra_vars][0]]:
hostname = self.inventory.get_host(hostname=host)
service_name = '{}-{}'.format(extra_vars, self.variable_manager.get_vars(
host=hostname)[service_names[extra_vars][1]])
self.variable_manager.set_host_variable(
host=hostname, varname='service_name', value=service_name)
if self.cluster_name and extra_vars:
self.variable_manager.extra_vars = {
'cluster_name': self.cluster_name, 'service': extra_vars}
else:
self.variable_manager.extra_vars = {
'cluster_name': self.cluster_name}
if register and with_items:
task = [dict(action=dict(module=module_name,
args=module_args),
become=become,
register=register,
with_items=with_items)]
elif register is None and with_items:
task = [dict(action=dict(module=module_name,
args=module_args),
become=become,
with_items=with_items)]
elif register and with_items is None:
task = [dict(action=dict(module=module_name,
args=module_args),
become=become,
register=register)]
else:
task = [dict(action=dict(module=module_name,
args=module_args),
become=become)]
if node:
node_list = node.split(',')
if len(node_list) == 1:
node_str = '{},'.format(node)
else:
node_str = ','.join(node_list)
play_source = dict(
name="Ansible Play",
hosts=self.ips if self.ips else (node_str if node else group),
gather_facts='no',
tasks=task
)
play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader)
tqm = None
self.callback = ModelResultsCollector()
import traceback
try:
tqm = TaskQueueManager(
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader,
options=self.options,
passwords=self.password,
stdout_callback="minimal",
)
tqm._stdout_callback = self.callback
tqm.run(play)
except Exception as e:
term.warn(str(e))
term.debug(traceback.print_exc())
finally:
if tqm is not None:
tqm.cleanup()
result = self.get_model_result()
failed = {}
unreachable = {}
offline_list = []
if self.topology:
for grp in ['drainer_servers', 'pump_servers', 'tikv_servers']:
if not self.topology.has_key(grp) or not self.topology[grp]:
continue
for _node in self.topology[grp]:
if _node['offline']:
offline_list.append(_node['uuid'])
if result['success']:
for _uuid, _info in result['success'].iteritems():
_ip = _info['ansible_host']
if _info.has_key('stderr') and _info['stderr']:
try:
failed[_uuid][_ip].append(_info['stderr'])
except:
if not failed.has_key(_uuid):
failed[_uuid] = {}
failed[_uuid][_ip] = [_info['stderr']]
if result['failed']:
for _uuid, _info in result['failed'].iteritems():
_ip = _info['ansible_host']
if _info.has_key('stderr') and _info['stderr']:
try:
failed[_uuid][_ip].append(_info['stderr'])
except:
if not failed.has_key(_uuid):
failed[_uuid] = {}
failed[_uuid][_ip] = [_info['stderr']]
if _info.has_key('stdout') and _info['stdout']:
try:
failed[_uuid][_ip].append(_info['stdout'])
except:
if not failed.has_key(_uuid):
failed[_uuid] = {}
failed[_uuid][_ip] = [_info['stdout']]
if _info.has_key('msg') and \
_info['msg'] and \
"'full_data_dir' is undefined" not in _info['msg'] and \
not re.search(r'Could not find.*firewalld', _info['msg']):
if _uuid in offline_list and re.search(r'the.*port.*is not up', _info['msg']):
continue
try:
failed[_uuid][_ip].append(_info['msg'])
except:
if not failed.has_key(_uuid):
failed[_uuid] = {}
failed[_uuid][_ip] = [_info['msg']]
if result['unreachable']:
for _uuid, _info in result['unreachable'].iteritems():
_ip = _info['ansible_host']
if _info.has_key('stderr') and _info['stderr']:
try:
unreachable[_uuid][_ip].append(_info['stderr'])
except:
if not unreachable.has_key(_uuid):
unreachable[_uuid] = {}
unreachable[_uuid][_ip] = [_info['stderr']]
if _info.has_key('stdout') and _info['stdout']:
try:
unreachable[_uuid][_ip].append(_info['stdout'])
except:
if not unreachable.has_key(_uuid):
unreachable[_uuid] = {}
unreachable[_uuid][_ip] = [_info['stdout']]
if _info.has_key('msg') and _info['msg']:
try:
unreachable[_uuid][_ip].append(_info['msg'])
except:
if not unreachable.has_key(_uuid):
unreachable[_uuid] = {}
unreachable[_uuid][_ip] = [_info['msg']]
if not failed and not unreachable:
return result
msg = {}
msg['failed'] = failed
msg['unreachable'] = unreachable
raise exceptions.TiOPSRuntimeError(msg, result, tp='ansible')
def run_playbook(self, playbook_path, extra_vars=None):
"""
运行playbook
"""
try:
self.callback = PlayBookResultsCollector()
if extra_vars:
self.variable_manager.extra_vars = extra_vars
executor = PlaybookExecutor(
playbooks=[playbook_path],
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader,
options=self.options,
passwords=self.password,
)
executor._tqm._stdout_callback = self.callback
executor.run()
except Exception as e:
term.warn(str(e))
return False
if __name__ == '__main__':
a = "192.168.111.137,127.0.0.1"
rbt = ANSRunner(a)
rbt.run_model('shell', 'uptime')
result = json.dumps(rbt.get_model_result(), indent=4)
print(result)
| [
2,
19617,
25,
3384,
69,
12,
23,
628,
198,
11748,
302,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
18931,
198,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
6738,
28686,
13,
6978,
1330,
4654,
11,
2352,
6978,
198,
198,
6738,
9093,... | 1.737385 | 7,372 |
from dagster import (
Array,
Field,
ModeDefinition,
Noneable,
ScalarUnion,
Selector,
Shape,
pipeline,
resource,
solid,
)
from dagster.config.field import resolve_to_config_type
from dagster.core.serdes import deserialize_json_to_dagster_namedtuple, serialize_dagster_namedtuple
from dagster.core.snap.config_types import (
ConfigTypeKind,
build_config_schema_snapshot,
snap_from_config_type,
)
| [
6738,
48924,
1706,
1330,
357,
198,
220,
220,
220,
15690,
11,
198,
220,
220,
220,
7663,
11,
198,
220,
220,
220,
10363,
36621,
11,
198,
220,
220,
220,
6045,
540,
11,
198,
220,
220,
220,
34529,
283,
38176,
11,
198,
220,
220,
220,
968... | 2.530055 | 183 |
from .schedule import Schedule
from .exceptions import *
| [
6738,
764,
15952,
5950,
1330,
19281,
198,
6738,
764,
1069,
11755,
1330,
1635,
198
] | 4.071429 | 14 |
import math
import random
import re
import time
from turtle import numinput
from cv2 import INTER_AREA, INTER_BITS, INTER_CUBIC, INTER_LANCZOS4, INTER_LINEAR, INTER_LINEAR_EXACT, INTER_MAX, imread, imshow, waitKey
import numpy as np
import cv2
import os
from multiprocessing import Process, Manager
data_path = os.getcwd()
dataset_path = data_path+"\\train_data\\"
toShow = cv2.imread(dataset_path+"8.jpg")
cv2.imshow("File",toShow)
cv2.waitKey(0)
| [
11748,
10688,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
640,
198,
6738,
28699,
1330,
997,
15414,
198,
6738,
269,
85,
17,
1330,
23255,
62,
12203,
32,
11,
23255,
62,
26094,
50,
11,
23255,
62,
34,
10526,
2149,
11,
23255,
62,
43,
2... | 2.637427 | 171 |
from django.db import models
# Create your models here.
from sellshop.utils.base_models import BaseModel
from django_countries.fields import CountryField
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
2,
13610,
534,
4981,
994,
13,
198,
6738,
3677,
24643,
13,
26791,
13,
8692,
62,
27530,
1330,
7308,
17633,
198,
6738,
42625,
14208,
62,
9127,
1678,
13,
25747,
1330,
12946,
15878,
628,
... | 3.659091 | 44 |
import os
import numpy as np
from matplotlib import pyplot
if __name__ == '__main__':
# Load Data
# The first two columns contains the X values and the third column
# contains the label (y).
data = np.loadtxt(os.path.join("/home/thelichking/Desktop/ml-coursera-python-assignments/Exercise2/Data",
"ex2data2.txt"), delimiter=',')
X = data[:, :2]
y = data[:, 2]
plotData(X,y) | [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1303,
8778,
6060,
198,
220,
220,
220,
1303,... | 2.310526 | 190 |
from rest_framework import serializers | [
6738,
1334,
62,
30604,
1330,
11389,
11341
] | 5.428571 | 7 |
import pandas as pd
import numpy as np
import transform
import classifier
import FoldBuilder
import training_utils
import submission_utils
if __name__ == '__main__':
mfcc_config = [
dict(
numcep = 14,
nfilt= 21,
lowfreq=500,
highfreq=None,
cornerfreq=1500.
),
]
window_config = [
dict(
window_length = 149,
overlap = 0.9,
)
]
rng_config = [dict(random_seed=i) for i in range(40, 40+16)]
clf_config = [
dict(
forest_params = dict(
n_estimators = 100,
n_jobs = 6,
min_samples_leaf = 2,
min_samples_split = 5,
oob_score=True,
)
)
]
configs = training_utils.combine_configs(
mfcc_config=mfcc_config,
window_config=window_config,
clf_config=clf_config,
rng_config=rng_config
)
fold_builder = FoldBuilder.FoldBuilder()
fold = fold_builder.submission_fold()
list_of_wavs = fold_builder.get_wavs()
run_name = "pca_100_16_149"
dump = True
pred_dfs = []
for i, config in list(enumerate(configs)):
print("Building model %d in run %s" % (i, run_name))
model = build_model(run_name, i, config, list_of_wavs, fold)
pred_dfs.append(model.pred_df)
if dump:
training_utils.dump_model(model)
blended_df = submission_utils.normalized_even_blend(pred_dfs)
submission_utils.write_submission_csv(blended_df, 'submission.csv')
| [
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
11748,
6121,
201,
198,
11748,
1398,
7483,
201,
198,
11748,
39957,
32875,
201,
198,
11748,
3047,
62,
26791,
201,
198,
11748,
14498,
62,
26791,
... | 1.743295 | 1,044 |
""" visualises networks of related samples using cytoscape.js
A component of the findNeighbour4 system for bacterial relatedness monitoring
Copyright (C) 2021 David Wyllie david.wyllie@phe.gov.uk
repo: https://github.com/davidhwyllie/findNeighbour4
This program is free software: you can redistribute it and/or modify
it under the terms of the MIT License as published
by the Free Software Foundation. See <https://opensource.org/licenses/MIT>, and the LICENSE file.
"""
import networkx as nx
class snvNetwork:
"""build and output Cytoscape compatible networks"""
def __init__(self, snv_threshold=None):
"""makes a undirected graph of samples;
makes edges if pairwise SNV <= snv_threshold SNV
assigns edge weight of 1/(1+SNV_distance)
Note that in all the below documentation, 'guid' refers to a node identified by a guid,
and 'guids' to multiple such nodes.
"""
self.G = nx.Graph()
self.snv_threshold = snv_threshold
def is_mixed(self, guid):
"""returns True if the is_mixed attribute is set True"""
try:
if self.G.nodes[guid]["is_mixed"] is True:
return True
except KeyError:
# there's no 'is_mixed' attribute
pass
return False
def snv2weight(self, x):
"""returns 1/(1+x)"""
x = float(x)
return 1 / (1 + x)
def raise_error(self, token):
"""raises a ZeroDivisionError, with token as the message.
useful for unit tests of error logging"""
raise ZeroDivisionError(token)
def to_dict(self):
"""converts snv_clustering object to a dictionary."""
return nx.json_graph.node_link_data(self.G)
def set_mixed(self, guid):
"""marks guid as being mixed."""
# set is_mixed attribute
self.G.nodes[guid]["is_mixed"] = True
def add_sample(self, starting_guid, guids=None, neighbours=[], **kwargs):
"""adds a sample, guid, linked to neighbours.
- guid should be a string
- guids are all the samples which are in the cluster. edges outside these will not be displayed
If None, then all edges will be displayed.
- neighbours should be a list of tuples (guid, SNV)
- additional arguments are added as node properties (e.g. surname = 'Smith')
"""
# create a list of guid - neighbour links,
# suitable for importing into networkx,
# from the input data
self.G.add_node(starting_guid, **kwargs)
for item in neighbours:
if not len(item) == 2:
raise TypeError(
"Neighbours must be a list of tuples (guid, snv) but it is {0}".format(
item
)
)
add_edge = False
if guids is None:
add_edge = True
else:
if starting_guid in guids and item[0] in guids:
add_edge = True
if add_edge is True:
self.G.add_edge(
starting_guid, item[0], weight=self.snv2weight(item[1]), snv=item[1]
)
def guids(self):
"""returns a set of all guids in the graph"""
return set(self.G.nodes)
def network2cytoscapejs(self, max_edges=1e5):
"""this function is used to convert networkx to Cytoscape.js JSON format
used by the elements property http://js.cytoscape.org/#notation/elements-json
returns dictionary {'success': (0 or 1), 'message': description of result
'elements':{data usable by cytoscape.js as elements property on construction}}
will not return any network with > max_edges"""
# lookup snv
w = nx.get_edge_attributes(self.G, "weight")
snv = nx.get_edge_attributes(self.G, "snv")
# load all nodes into elements array
elements = []
for node in self.G.nodes(data=True):
dat = {"id": node[0], **node[1]}
elements.append({"group": "nodes", "data": dat})
nNodes = len(elements)
if nNodes == 0:
return {
"elements": elements,
"success": 1,
"message": "No nodes found",
"nNodes": 0,
"nEdges": 0,
}
# load all edges to edges array
edge_id = 0
for edge in self.G.edges():
snv_dist = snv[(edge[0], edge[1])]
if snv_dist <= self.snv_threshold:
edge_id += 1
if edge_id > max_edges:
return {
"elements": {},
"message": "Not rendered; > {0} edges present".format(
max_edges
),
"success": 0,
}
elements.append(
{
"group": "edges",
"data": {
"id": edge_id,
"source": edge[0],
"target": edge[1],
"weight": w[(edge[0], edge[1])],
"snv": snv_dist,
},
}
)
return {
"elements": elements,
"success": 1,
"message": "Graph with {0} nodes and {1} edges <= {2} SNV".format(
nNodes, edge_id, self.snv_threshold
),
"nNodes": nNodes,
"nEdges": edge_id,
}
| [
37811,
5874,
2696,
7686,
286,
3519,
8405,
1262,
27104,
17500,
1758,
13,
8457,
198,
198,
32,
7515,
286,
262,
1064,
46445,
6084,
19,
1080,
329,
23462,
3519,
1108,
9904,
198,
15269,
357,
34,
8,
33448,
3271,
12958,
297,
494,
21970,
13,
21... | 2.001423 | 2,810 |
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: toaiff
from warnings import warnpy3k
warnpy3k('the toaiff module has been removed in Python 3.0', stacklevel=2)
del warnpy3k
import os, tempfile, pipes, sndhdr
__all__ = [
'error', 'toaiff']
table = {}
t = pipes.Template()
t.append('sox -t au - -t aiff -r 8000 -', '--')
table['au'] = t
t = pipes.Template()
t.append('sox -t hcom - -t aiff -r 22050 -', '--')
table['hcom'] = t
t = pipes.Template()
t.append('sox -t voc - -t aiff -r 11025 -', '--')
table['voc'] = t
t = pipes.Template()
t.append('sox -t wav - -t aiff -', '--')
table['wav'] = t
t = pipes.Template()
t.append('sox -t 8svx - -t aiff -r 16000 -', '--')
table['8svx'] = t
t = pipes.Template()
t.append('sox -t sndt - -t aiff -r 16000 -', '--')
table['sndt'] = t
t = pipes.Template()
t.append('sox -t sndr - -t aiff -r 16000 -', '--')
table['sndr'] = t
uncompress = pipes.Template()
uncompress.append('uncompress', '--')
| [
2,
34318,
2349,
21,
2196,
513,
13,
17,
13,
19,
198,
2,
11361,
18022,
8189,
362,
13,
22,
357,
21,
1828,
1157,
8,
198,
2,
4280,
3361,
3902,
422,
25,
11361,
362,
13,
22,
13,
1314,
357,
85,
17,
13,
22,
13,
1314,
25,
6888,
2998,
... | 2.347253 | 455 |
# Copyright (c) 2008-2011 Tim Newsham, Andrey Mirtchovski
# Copyright (c) 2011-2012 Peter V. Saveliev
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
9P protocol implementation as documented in plan9 intro(5) and <fcall.h>.
"""
import os
import stat
import sys
import socket
import select
import traceback
import io
import threading
import struct
from . import utils as c9
if sys.version_info[0] == 3:
unicode = str
IOHDRSZ = 24
PORT = 564
cmdName = {}
Tversion = 100
Rversion = 101
Tauth = 102
Rauth = 103
Tattach = 104
Rattach = 105
Terror = 106
Rerror = 107
Tflush = 108
Rflush = 109
Twalk = 110
Rwalk = 111
Topen = 112
Ropen = 113
Tcreate = 114
Rcreate = 115
Tread = 116
Rread = 117
Twrite = 118
Rwrite = 119
Tclunk = 120
Rclunk = 121
Tremove = 122
Rremove = 123
Tstat = 124
Rstat = 125
Twstat = 126
Rwstat = 127
for i, k in dict(globals()).items():
try:
if (i[0] in ('T', 'R')) and isinstance(k, int):
cmdName[k] = i
except:
pass
version = b'9P2000'
versionu = b'9P2000.u'
Ebadoffset = "bad offset"
Ebotch = "9P protocol botch"
Ecreatenondir = "create in non-directory"
Edupfid = "duplicate fid"
Eduptag = "duplicate tag"
Eisdir = "is a directory"
Enocreate = "create prohibited"
Enoremove = "remove prohibited"
Enostat = "stat prohibited"
Enotfound = "file not found"
Enowstat = "wstat prohibited"
Eperm = "permission denied"
Eunknownfid = "unknown fid"
Ebaddir = "bad directory in wstat"
Ewalknotdir = "walk in non-directory"
Eopen = "file not open"
NOTAG = 0xffff
NOFID = 0xffffffff
# for completeness including all of p9p's defines
OREAD = 0 # open for read
OWRITE = 1 # write
ORDWR = 2 # read and write
OEXEC = 3 # execute, == read but check execute permission
OTRUNC = 16 # or'ed in (except for exec), truncate file first
OCEXEC = 32 # or'ed in, close on exec
ORCLOSE = 64 # or'ed in, remove on close
ODIRECT = 128 # or'ed in, direct access
ONONBLOCK = 256 # or'ed in, non-blocking call
OEXCL = 0x1000 # or'ed in, exclusive use (create only)
OLOCK = 0x2000 # or'ed in, lock after opening
OAPPEND = 0x4000 # or'ed in, append only
AEXIST = 0 # accessible: exists
AEXEC = 1 # execute access
AWRITE = 2 # write access
AREAD = 4 # read access
# Qid.type
QTDIR = 0x80 # type bit for directories
QTAPPEND = 0x40 # type bit for append only files
QTEXCL = 0x20 # type bit for exclusive use files
QTMOUNT = 0x10 # type bit for mounted channel
QTAUTH = 0x08 # type bit for authentication file
QTTMP = 0x04 # type bit for non-backed-up file
QTSYMLINK = 0x02 # type bit for symbolic link
QTFILE = 0x00 # type bits for plain file
# Dir.mode
DMDIR = 0x80000000 # mode bit for directories
DMAPPEND = 0x40000000 # mode bit for append only files
DMEXCL = 0x20000000 # mode bit for exclusive use files
DMMOUNT = 0x10000000 # mode bit for mounted channel
DMAUTH = 0x08000000 # mode bit for authentication file
DMTMP = 0x04000000 # mode bit for non-backed-up file
DMSYMLINK = 0x02000000 # mode bit for symbolic link (Unix, 9P2000.u)
DMDEVICE = 0x00800000 # mode bit for device file (Unix, 9P2000.u)
DMNAMEDPIPE = 0x00200000 # mode bit for named pipe (Unix, 9P2000.u)
DMSOCKET = 0x00100000 # mode bit for socket (Unix, 9P2000.u)
DMSETUID = 0x00080000 # mode bit for setuid (Unix, 9P2000.u)
DMSETGID = 0x00040000 # mode bit for setgid (Unix, 9P2000.u)
DMSTICKY = 0x00010000 # mode bit for sticky bit (Unix, 9P2000.u)
DMREAD = 0x4 # mode bit for read permission
DMWRITE = 0x2 # mode bit for write permission
DMEXEC = 0x1 # mode bit for execute permission
ERRUNDEF = 0xFFFFFFFF
UIDUNDEF = 0xFFFFFFFF
# supported authentication protocols
auths = ['pki', 'sk1']
def otoa(p):
'''Convert from open() to access()-style args'''
ret = 0
np = p & 3
if np == OREAD:
ret = AREAD
elif np == OWRITE:
ret = AWRITE
elif np == ORDWR:
ret = AREAD | AWRITE
elif np == OEXEC:
ret = AEXEC
if(p & OTRUNC):
ret |= AWRITE
return ret
def hasperm(f, uid, p):
'''Verify permissions for access type 'p' to file 'f'. 'p' is of the type
returned by otoa() above, i.e., should contain the A* flags.
f should resemble Dir, i.e., should have f.mode, f.uid, f.gid'''
m = f.mode & 7 # other
if (p & m) == p:
return 1
if f.uid == uid:
m |= (f.mode >> 6) & 7
if (p & m) == p:
return 1
if f.gid == uid:
m |= (f.mode >> 3) & 7
if (p & m) == p:
return 1
return 0
class Sock(object):
"""Per-connection state and appropriate read and write methods
for the Marshaller."""
class Fcall(object):
'''# possible values, from p9p's fcall.h
msize # Tversion, Rversion
version # Tversion, Rversion
oldtag # Tflush
ename # Rerror
qid # Rattach, Ropen, Rcreate
iounit # Ropen, Rcreate
aqid # Rauth
afid # Tauth, Tattach
uname # Tauth, Tattach
aname # Tauth, Tattach
perm # Tcreate
name # Tcreate
mode # Tcreate, Topen
newfid # Twalk
nwname # Twalk
wname # Twalk, array
nwqid # Rwalk
wqid # Rwalk, array
offset # Tread, Twrite
count # Tread, Twrite, Rread
data # Twrite, Rread
nstat # Twstat, Rstat
stat # Twstat, Rstat
# dotu extensions:
errno # Rerror
extension # Tcreate
'''
# type: server type
# dev server subtype
#
# file data:
# qid unique id from server
# mode permissions
# atime last read time
# mtime last write time
# length file length
# name
# uid owner name
# gid group name
# muid last modifier name
#
# 9P2000.u extensions:
# uidnum numeric uid
# gidnum numeric gid
# muidnum numeric muid
# *ext extended info
class Server(object):
"""
A server interface to the protocol.
Subclass this to provide service
"""
chatty = False
readpool = []
writepool = []
activesocks = {}
def shutdown(self, sock):
"""Close down a connection."""
if sock not in self.activesocks:
return
s = self.activesocks[sock]
assert not s.closing # we looped!
s.closing = True
if sock in self.readpool:
self.readpool.remove(sock)
if sock in self.writepool:
self.writepool.remove(sock)
# find first tag not in use
tags = [r.ifcall.tag for r in s.reqs]
tag = [n for n in range(1, 65535) if n not in tags][0]
# flush all outstanding requests
for r in s.reqs:
req = Req(tag)
req.ifcall = Fcall(Tflush, tag=tag, oldtag=r.ifcall.tag)
req.ofcall = Fcall(Rflush, tag=tag)
req.fd = s.fileno()
req.sock = s
self.tflush(req)
# clunk all open fids
fids = list(s.fids.keys())
for fid in fids:
req = Req(tag)
req.ifcall = Fcall(Tclunk, tag=tag, fid=fid)
req.ofcall = Fcall(Rclunk, tag=tag)
req.fd = s.fileno()
req.sock = s
self.tclunk(req)
# flush should have taken care of this
assert sock not in self.deferwrite and sock not in self.deferread
sock.close()
del self.activesocks[sock]
def regreadfd(self, fd, req):
'''Register a file descriptor in the read pool. When a fileserver
wants to delay responding to a message they can register an fd and
have it polled for reading. When it's ready, the corresponding 'req'
will be called'''
self.deferread[fd] = req
self.readpool.append(fd)
def regwritefd(self, fd, req):
'''Register a file descriptor in the write pool.'''
self.deferwrite[fd] = req
self.writepool.append(fd)
def unregreadfd(self, fd):
'''Delete a fd registered with regreadfd().'''
del self.deferread[fd]
self.readpool.remove(fd)
def unregwritefd(self, fd):
'''Delete a fd registered with regwritefd().'''
del self.deferwrite[fd]
self.writepool.remove(fd)
class Client(object):
"""
A client interface to the protocol.
"""
AFID = 10
ROOT = 11
CWD = 12
F = 13
path = '' # for 'getwd' equivalent
# protocol calls; part of 9p
# should be private functions, really
# user accessible calls, the actual implementation of a client
| [
2,
15069,
357,
66,
8,
3648,
12,
9804,
5045,
968,
1477,
321,
11,
843,
4364,
337,
2265,
354,
709,
20545,
198,
2,
15069,
357,
66,
8,
2813,
12,
6999,
5613,
569,
13,
10318,
626,
11203,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11... | 2.33452 | 4,215 |
# 03/08/2020 - JonatanPaschoal
#declarando funções
func1() #chamando as função e seus argumentos entre parênteses
# função e recebe argumentos
func2('Jonatan', 'Paschoal')
# função retorna valor
cubo(3) # apenas excuta a função
f = cubo(3) # atribui-se o valor da função a uma variável
print(f)
print('-'*18)# ou
print(f'Volume = {cubo(2)}')
print('-'*18) | [
2,
7643,
14,
2919,
14,
42334,
532,
5966,
39036,
47,
292,
6679,
282,
198,
198,
2,
32446,
283,
25440,
1257,
16175,
127,
113,
274,
198,
198,
20786,
16,
3419,
1303,
49869,
25440,
355,
1257,
16175,
28749,
304,
384,
385,
4578,
418,
920,
2... | 2.140351 | 171 |
import datetime
from typing import Dict, Union
MAESTRO_INFORMATION = [
MCZInformation(0, "message_type", "message_type"),
MCZInformation(1, "power", "power"),
MCZInformation(2, "front_fan", "int"),
MCZInformation(3, "lower_back_fan", "int"),
MCZInformation(4, "top_back_fan", "int"),
MCZInformation(5, "fume_temperature", "temperature"),
MCZInformation(6, "ambient_temperature", "temperature"),
MCZInformation(7, "puffer_temperature", "temperature"),
MCZInformation(8, "boiler_temperature", "temperature"),
MCZInformation(9, "NTC3_temperature", "temperature"),
MCZInformation(10, "candle_condition", "int"),
MCZInformation(11, "active_set", "int"),
MCZInformation(12, "rpm_fan_fume", "int"),
MCZInformation(13, "rpm_wormwheel_set", "int"),
MCZInformation(14, "rpm_wormwheel_life", "int"),
MCZInformation(15, "3wayvalve", "3way"),
MCZInformation(16, "pump_pwr", "int"),
MCZInformation(17, "brazier", "brazier"),
MCZInformation(18, "profile", "int"),
MCZInformation(19, "modbus_address", "int"),
MCZInformation(20, "active_mode", "int"),
MCZInformation(21, "active_live", "int"),
MCZInformation(22, "auto_control_mode", "int"),
MCZInformation(23, "eco", "int"),
MCZInformation(24, "silent", "int"),
MCZInformation(25, "chrono", "int"),
MCZInformation(26, "room_temperature", "temperature"),
MCZInformation(27, "boiler_temperature", "temperature"),
MCZInformation(28, "motherboard_temperature", "temperature"),
MCZInformation(29, "power_level", "int10"),
MCZInformation(30, "firmware_version", "int"),
MCZInformation(31, "database_id", "int"),
MCZInformation(32, "hour", "date-H"),
MCZInformation(33, "minute", "date-M"),
MCZInformation(34, "day", "date-d"),
MCZInformation(35, "month", "date-m"),
MCZInformation(36, "year", "date-Y"),
MCZInformation(37, "total_operating_hours", "timespan"),
MCZInformation(38, "hours_of_operation_in_power1", "timespan"),
MCZInformation(39, "hours_of_operation_in_power2", "timespan"),
MCZInformation(40, "hours_of_operation_in_power3", "timespan"),
MCZInformation(41, "hours_of_operation_in_power4", "timespan"),
MCZInformation(42, "hours_of_operation_in_power5", "timespan"),
MCZInformation(43, "hours_of_service", "int"),
MCZInformation(44, "minutes_to_switch_off", "int"),
MCZInformation(45, "number_of_ignitions", "int"),
MCZInformation(46, "active_temperature", "int"),
MCZInformation(47, "ferenheit", "onoff"),
MCZInformation(48, "sound_effects", "onoff"),
MCZInformation(49, "sound_effects_state", "onoff"),
MCZInformation(50, "sleep", "onoff"),
MCZInformation(51, "mode", "onoff"),
MCZInformation(52, "wifi_sonde_temperature1", "int"),
MCZInformation(53, "wifi_sonde_temperature2", "int"),
MCZInformation(54, "wifi_sonde_temperature3", "int"),
MCZInformation(55, "unknown", "int"),
MCZInformation(56, "puffer", "int"),
MCZInformation(57, "boiler", "int"),
MCZInformation(58, "health", "int"),
MCZInformation(59, "returntemperature", "temperature"),
MCZInformation(60, "antifreeze", "onoff"),
]
POWER_MODE_DESCRIPTION = {
0: "Stove off",
40: "Shutting down",
6: "Clean Hot Mode",
7: "Load in Hot Mode",
8: "Start 1 to Warm Mode",
10: "Stabilization",
11: "Power 1",
12: "Power 2",
13: "Power 3",
14: "Power 4",
15: "Power 5",
}
def get_mcz_info(frame_id: int) -> MCZInformation:
"""
Return the MCZInformation relative to a RecuperoInfo position.
Params
------
`frame_id` (`int`):
Frame ID position corresponding to a RecuperoInfo position
Returns
------
`mcz_information` (`MCZInformation`):
MCZInformation relative to a RecuperoInfo position.
"""
mcz_information = None
if frame_id >= 0 and frame_id <= 60:
mcz_information = MAESTRO_INFORMATION[frame_id]
else:
mcz_information = MCZInformation(frame_id, f"Unknown {frame_id}", "int")
return mcz_information
def format_seconds(seconds: int) -> str:
"""
Formats seconds into HH:MM:SS
Params
------
`seconds` (`int`):
seconds integer
Returns
------
`HH:MM:SS` (`str`)
"""
return str(datetime.timedelta(seconds=seconds))
def websocket_message_to_dict(message: str) -> Dict[str, Union[float, int, str]]:
"""
Format a Web Socket message into a human-readable dictionary.
Params
------
`message` (`str`):
Web Socket Message
Returns
------
`result` (`dict`):
Formatted result dictionary
"""
result = {}
message = message.split("|")
date = "Y-m-d H:M"
for idx, content in enumerate(message):
info = get_mcz_info(idx)
content = int(content, 16)
if info.message_type == "power":
result["power_mode_description"] = POWER_MODE_DESCRIPTION.get(content)
result[info.name] = 0 if content in [0, 40] else 1
elif info.message_type == "int10":
result[info.name] = content - 10
elif info.message_type == "temperature":
result[info.name] = float(content) / 2
elif info.message_type == "timespan":
result[info.name] = format_seconds(content)
elif info.message_type == "3way":
result[info.name] = "Sani" if content == 1 else "Risc"
elif info.message_type == "brazier":
result[info.name] = "OK" if content == 0 else "CLEAR"
elif info.message_type.startswith("date-"):
date_part = info.message_type[-1]
content = str(content)
content = "0" + content if len(content) == 1 else content
date = date.replace(date_part, str(content))
else:
result[info.name] = content
result["date"] = date
return result
| [
11748,
4818,
8079,
198,
6738,
19720,
1330,
360,
713,
11,
4479,
628,
198,
198,
5673,
1546,
5446,
46,
62,
1268,
35036,
796,
685,
198,
220,
220,
220,
13122,
57,
21918,
7,
15,
11,
366,
20500,
62,
4906,
1600,
366,
20500,
62,
4906,
12340,... | 2.436416 | 2,422 |
#!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
import argparse
import enum
import importlib
import inspect
import types
from pprint import pprint
class Parameter(object):
""" A placeholder object, used for the moment in the inline tests """
pass
QUALIFIER = '.'
def dotpath_join(base, *addenda):
""" Join dotpath elements together as one, á la os.path.join(…) """
for addendum in addenda:
if not base.endswith(QUALIFIER):
base += QUALIFIER
if addendum.startswith(QUALIFIER):
if len(addendum) == 1:
raise ValueError('operand too short: %s' % addendum)
addendum = addendum[1:]
base += addendum
# N.B. this might be overthinking it --
# maybe we *want* to allow dotpaths
# that happen to start and/or end with dots?
if base.endswith(QUALIFIER):
return base[:-1]
return base
def qualified_import(qualified):
""" Import a qualified thing-name.
e.g. 'instakit.processors.halftone.FloydSteinberg'
"""
if QUALIFIER not in qualified:
raise ValueError("qualified_import() needs a qualified name "
"(got %s)" % qualified)
head = qualified.split(QUALIFIER)[-1]
tail = qualified.replace("%s%s" % (QUALIFIER, head), '')
module = importlib.import_module(tail)
cls = getattr(module, head)
print("Qualified Import: %s" % qualified)
return cls
def qualified_name_tuple(cls):
""" Get the module name and the thing-name for a class.
e.g. ('instakit.processors.halftone', 'FloydSteinberg')
"""
mod_name = getattr(cls, '__module__')
cls_name = getattr(cls, '__qualname__',
getattr(cls, '__name__'))
return mod_name, cls_name
def qualified_name(cls):
""" Get a qualified thing-name for a class.
e.g. 'instakit.processors.halftone.FloydSteinberg'
"""
mod_name, cls_name = qualified_name_tuple(cls)
out = "%s%s%s" % (mod_name, QUALIFIER, cls_name)
print("Qualified Name: %s" % out)
return out
class Nothing(object):
""" Placeholder singleton, signifying nothing """
__slots__ = tuple()
def check_parameter_default(param_default):
""" Filter result values coming from inspect.signature(…) """
if param_default == inspect._empty:
return Nothing
return param_default
def default_arguments(cls):
""" Get a dictionary of the keyword arguments with provided defaults,
as furnished by a given classes’ “__init__” function.
"""
try:
signature = inspect.signature(cls)
except (ValueError, TypeError) as exc:
m, n = qualified_name_tuple(cls)
qn = "%s%sSlow%s" % (m.replace('ext.', ''), QUALIFIER, n) # WTF HAX
NonCythonCls = qualified_import(qn)
if qualified_name(NonCythonCls) != qualified_name(cls):
return default_arguments(NonCythonCls)
else:
raise exc
if len(signature.parameters) < 1:
return {}
return { parameter.name : check_parameter_default(parameter.default) \
for parameter \
in signature.parameters.values() }
def is_enum(cls):
""" Predicate function to ascertain whether a class is an Enum. """
return enum.Enum in cls.__mro__
def enum_choices(cls):
""" Return a list of the names of the given Enum class members. """
return [choice.name for choice in cls]
FILE_ARGUMENT_NAMES = ('path', 'pth', 'file')
def add_argparser(subparsers, cls):
""" Add a subparser -- an instance of “argparse.ArgumentParser” --
with arguments and defaults matching the keyword arguments and
defaults provided by the given class (q.v. “default_arguments(…)”
definition supra.)
"""
qualname = qualified_name(cls)
cls_help = getattr(cls, '__doc__', None) or "help for %s" % qualname
parser = subparsers.add_parser(qualname, help=cls_help)
if is_enum(cls): # Deal with enums
argument_name = cls.__name__.lower()
add_argument_args = dict(choices=enum_choices(cls),
type=str,
help='help for enum %s' % argument_name)
parser.add_argument(argument_name,
**add_argument_args)
else: # Deal with __init__ signature
for argument_name, argument_value in default_arguments(cls).items():
argument_type = type(argument_value)
argument_required = False
add_argument_args = dict(help='help for argument %s' % argument_name)
if argument_value is not Nothing:
add_argument_args.update({ 'default' : argument_value })
else:
add_argument_args.update({ 'type' : argument_name in FILE_ARGUMENT_NAMES \
and argparse.FileType('rb') \
or str })
argument_required = True
if argument_type is bool:
add_argument_args.update({ 'action' : 'store_true' })
elif argument_type is type(None):
add_argument_args.update({ 'type' : str })
elif is_enum(argument_type):
add_argument_args.update({ 'choices' : enum_choices(argument_type),
'type' : str })
argument_template = argument_required and '%s' or '--%s'
parser.add_argument(argument_template % argument_name,
**add_argument_args)
return parser
functype = types.FunctionType
def get_processors_from(module_name):
""" Memoized processor-extraction function """
from instakit.utils.static import asset
if not hasattr(get_processors_from, 'cache'):
get_processors_from.cache = {}
if module_name not in get_processors_from.cache:
processors = []
module = importlib.import_module(module_name)
print("Module: %s (%s)" % (module.__name__,
asset.relative(module.__file__)))
for thing in (getattr(module, name) for name in dir(module)):
if hasattr(thing, 'process'):
print("Found thing: %s" % thing)
if module.__name__ in thing.__module__:
if thing not in processors:
if type(getattr(thing, 'process')) is functype:
processors.append(thing)
get_processors_from.cache[module_name] = tuple(processors)
return get_processors_from.cache[module_name]
if __name__ == '__main__':
test() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
1822,
29572,
198,
11748,
33829,
198,
11748,
1330,
8019,
198,
11748,
10104,
198,
117... | 2.249072 | 2,963 |
import clientUtils
def play_sound(data):
"""
Parameters
----------
data: dict
Returns
-------
"""
if 'sound_name' in data:
clientUtils.sound(data.get('sound_name'))
return ""
return "Je ne trouve pas le son demandé"
| [
11748,
5456,
18274,
4487,
628,
198,
4299,
711,
62,
23661,
7,
7890,
2599,
198,
220,
220,
220,
37227,
628,
220,
220,
220,
40117,
198,
220,
220,
220,
24200,
438,
198,
220,
220,
220,
1366,
25,
8633,
628,
220,
220,
220,
16409,
198,
220,
... | 2.403509 | 114 |
import hashlib, binascii
null_user = User()
null_user.name = "null"
null_user.is_null = True
null_user.id = 0
| [
11748,
12234,
8019,
11,
9874,
292,
979,
72,
198,
198,
8423,
62,
7220,
796,
11787,
3419,
198,
8423,
62,
7220,
13,
3672,
796,
366,
8423,
1,
198,
8423,
62,
7220,
13,
271,
62,
8423,
796,
6407,
198,
8423,
62,
7220,
13,
312,
796,
657,
... | 2.466667 | 45 |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
housing_data= pd.read_csv("C:\\Users\\sofia.dejesus\\Documents\\02_book\\kc_house_data.csv")
#Housing_data.head()
#print(housing_data.shape)
housing_data.describe(include=[np.number])
housing_data.head
housing_data.describe()
#Checking for missing values in data
housing_data.isnull().sum()
#Pairplotting for some data
coln = ['price','sqft_living','zipcode', 'sqft_above']
sns.pairplot(housing_data[coln], height = 4);
plt.savefig('pairplotting.png',dpi =300)
plt.show()
from plotnine.data import huron
from plotnine import ggplot, aes, geom_boxplot
(ggplot(huron)
+aes(x = 'sqft_living', y='sqft_above')
+geom_boxplot()
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,... | 2.528428 | 299 |
from prettytable import PrettyTable | [
6738,
2495,
11487,
1330,
20090,
10962
] | 5.833333 | 6 |
import typing
from . import base, fields
class PassportElementError(base.TelegramObject):
"""
This object represents an error in the Telegram Passport element which was submitted that
should be resolved by the user.
https://core.telegram.org/bots/api#passportelementerror
"""
source: base.String = fields.Field()
type: base.String = fields.Field()
message: base.String = fields.Field()
class PassportElementErrorDataField(PassportElementError):
"""
Represents an issue in one of the data fields that was provided by the user.
The error is considered resolved when the field's value changes.
https://core.telegram.org/bots/api#passportelementerrordatafield
"""
field_name: base.String = fields.Field()
data_hash: base.String = fields.Field()
class PassportElementErrorFile(PassportElementError):
"""
Represents an issue with a document scan.
The error is considered resolved when the file with the document scan changes.
https://core.telegram.org/bots/api#passportelementerrorfile
"""
file_hash: base.String = fields.Field()
class PassportElementErrorFiles(PassportElementError):
"""
Represents an issue with a list of scans.
The error is considered resolved when the list of files containing the scans changes.
https://core.telegram.org/bots/api#passportelementerrorfiles
"""
file_hashes: typing.List[base.String] = fields.ListField()
class PassportElementErrorFrontSide(PassportElementError):
"""
Represents an issue with the front side of a document.
The error is considered resolved when the file with the front side of the document changes.
https://core.telegram.org/bots/api#passportelementerrorfrontside
"""
file_hash: base.String = fields.Field()
class PassportElementErrorReverseSide(PassportElementError):
"""
Represents an issue with the reverse side of a document.
The error is considered resolved when the file with reverse side of the document changes.
https://core.telegram.org/bots/api#passportelementerrorreverseside
"""
file_hash: base.String = fields.Field()
class PassportElementErrorSelfie(PassportElementError):
"""
Represents an issue with the selfie with a document.
The error is considered resolved when the file with the selfie changes.
https://core.telegram.org/bots/api#passportelementerrorselfie
"""
file_hash: base.String = fields.Field()
| [
11748,
19720,
198,
198,
6738,
764,
1330,
2779,
11,
7032,
628,
198,
4871,
6251,
634,
20180,
12331,
7,
8692,
13,
6767,
30536,
10267,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
770,
2134,
6870,
281,
4049,
287,
262,
50203,
6251,... | 3.338275 | 742 |
from query_parser import QueryParser
from unittest import TestCase
| [
6738,
12405,
62,
48610,
1330,
43301,
46677,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198
] | 4.1875 | 16 |
import time
start = time.perf_counter()
for i in range(10000):
x = "aaaaa".replace("a", "e")
print(time.perf_counter() - start)
| [
11748,
640,
198,
198,
9688,
796,
640,
13,
525,
69,
62,
24588,
3419,
198,
198,
1640,
1312,
287,
2837,
7,
49388,
2599,
198,
220,
220,
220,
2124,
796,
366,
24794,
64,
1911,
33491,
7203,
64,
1600,
366,
68,
4943,
198,
198,
4798,
7,
243... | 2.5 | 54 |
U = [314882150829468584,
427197303358170108,
1022292690726729920,
1698479428772363217,
2006101093849356424]
n, q = map(int, input().split())
A = [i for i in range(n + 1)]
FA = [f(i) for i in A]
for _ in range(q):
l, r = map(int, input().split())
s = sum(FA[l:r + 1])
print(s)
t = s % 5
for i in range(l, r + 1):
A[i] = A[i] * U[t]
FA[i] = f(A[i])
| [
52,
796,
685,
33638,
3459,
2481,
33042,
27696,
3104,
46352,
11,
198,
220,
220,
220,
220,
45345,
24991,
1270,
2091,
3365,
1558,
486,
2919,
11,
198,
220,
220,
220,
220,
838,
1828,
1959,
26276,
2998,
25674,
22579,
1238,
11,
198,
220,
220... | 1.80531 | 226 |
'''
run with: python ten2eleven.py -f numframes test_dummy_old_MDA_code.py
Author: Tyler Reddy
'''
from __future__ import absolute_import
from lib2to3.fixer_base import BaseFix
from lib2to3.fixer_util import Name, Dot
from lib2to3 import pytree
| [
7061,
6,
198,
5143,
351,
25,
21015,
3478,
17,
11129,
574,
13,
9078,
532,
69,
997,
37805,
1332,
62,
67,
13513,
62,
727,
62,
44,
5631,
62,
8189,
13,
9078,
220,
198,
13838,
25,
14886,
2297,
9892,
198,
7061,
6,
198,
6738,
11593,
37443... | 2.775281 | 89 |
"""Copy actions from ProbReg project into ActieReg project
"""
import dml
import _basic.models as my
from django.contrib.auth.models import User
def main(fnaam):
"""Main function
"""
data = [actie[0] for actie in dml.Acties(fnaam, arch="alles").lijst]
for item in data:
actie = dml.Actie(fnaam, item)
about, what = actie.titel.split(": ")
if actie.status == "":
actie.status = " "
nieuw = my.Actie.objects.create(nummer=actie.id,
starter=User.objects.get(pk=1),
about=about,
title=what,
lasteditor=User.objects.get(pk=1),
status=my.Status.objects.get(value=actie.status),
soort=my.Soort.objects.get(value=actie.soort),
behandelaar=User.objects.get(pk=1),
gewijzigd=actie.datum,
arch=actie.arch,
melding=actie.melding,
oorzaak=actie.oorzaak,
oplossing=actie.oplossing,
vervolg=actie.vervolg)
for start, text in actie.events:
my.Event.objects.create(actie=nieuw,
start=start,
starter=User.objects.get(pk=1),
text=text)
if __name__ == "__main__":
main("probreg.xml")
| [
37811,
29881,
4028,
422,
30873,
8081,
1628,
656,
2191,
494,
8081,
1628,
198,
37811,
198,
11748,
288,
4029,
198,
11748,
4808,
35487,
13,
27530,
355,
616,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
628,
198... | 1.544618 | 1,087 |
import tkinter as tk
import matplotlib
import matplotlib.pyplot as plt
from gui.histogram import Histogram
from gui.operations.computer_vision import Vision
from gui.image_frame import ImageFrame
import app_config
matplotlib.use("TkAgg")
class TabPicture:
"""
Kompozycja obiektów Vision przechowujących obrazy do operacji.
In feature main control of tkinter tab displaying images and other data.
"""
gallery = {}
def match(self, what):
'''
Determine if this note matches the filter text.
Return true if it matches, False otherwise.
Search is case sensitive and matches both name and id.
:param what:
:return:
'''
return what == self.id or what in self.name.get()
@staticmethod
def search(finder):
'''
Find vison object in visions list
:param finder:
:return:
'''
return [TabPicture.gallery[tab] for tab in TabPicture.gallery.keys()
if TabPicture.gallery[tab].match(finder)]
def __contains__(self, item):
"""
Implement Container abstract method to check if object is in our list.
:param item:
:return:
"""
return len(self.search(item)) > 0
def open_image(self, path):
'''
Save copy of opened image for further usage.
:param path: image path
:return:
'''
if len(path) > 0:
self.vision.open_image(path)
else:
self.main_window.status_message.set("nie podano pliku")
def show_hist(self):
"""
Wyswietlenie histogramu dla danego okna. zachowanie Mathplota
zostawic . wyswietlanie dodatkowych ekranow z wykozystaniem tego
:return:
"""
# wyczyszczenie grafu przed zaladowaniem kolejnego , jak zaladowac kilka instancji do kilku obrazkow ?
plt.hist(self.vision.cvImage.image.ravel(), 256, [0, 255])
plt.show()
| [
11748,
256,
74,
3849,
355,
256,
74,
198,
198,
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
6738,
11774,
13,
10034,
21857,
1330,
5590,
21857,
198,
6738,
11774,
13,
3575,
602,
13,
332... | 2.296167 | 861 |
#
# This file is part of Python Client Library for the LCCS Web Service.
# Copyright (C) 2019-2020 INPE.
#
# Python Client Library for the LCCS Web Service is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Python Client Library for the LCCS Web Service."""
from .classes import ClassificationSystemClass
from .link import Link
from .utils import Utils
class ClassificationSystem(dict):
"""Classification System Class."""
def __init__(self, data, validate=False):
"""Initialize instance with dictionary data.
:param data: Dict with class system metadata.
:param validate: true if the Class System should be validate using its jsonschema. Default is False.
"""
self._validate = validate
super(ClassificationSystem, self).__init__(data or {})
self['classes'] = dict()
@property
def links(self):
""":return: a list of link in the classification system."""
return [Link(link) for link in self['links']]
@property
def description(self):
""":return: description of classification system."""
return self['description']
@property
def version(self):
""":return: version of classification system."""
return self['version']
@property
def name(self):
""":return: name of classification system."""
return self['name']
@property
def id(self):
""":return: id of classification system."""
return self['id']
@property
def authority_name(self):
""":return: authority_name of classification system."""
return self['authority_name']
def classes(self, class_name=None):
""":return: classes of the classification system."""
return self._get_classes(class_name)
def _get_classes(self, class_name):
""":return: get classes of the classification system."""
if not bool(self['classes']):
self._classes_links = next(Utils._get(link['href']) for link in self['links'] if link['rel'] == 'classes')
for i in self._classes_links:
if i['rel'] == 'child':
class_data = ClassificationSystemClass(Utils._get(i['href'], self._validate), self._validate)
if 'class_parent_id' in class_data:
parent_class_uri = i['href'].rsplit('/', maxsplit=1)[0] + f'/{class_data.class_parent_id}'
class_data['class_parent_name'] = ClassificationSystemClass(Utils._get(parent_class_uri)).name
self['classes'][f"{class_data.name}"] = class_data
if class_name is not None:
return self['classes'][f'{class_name}'] if class_name in self['classes'].keys() else {}
return self['classes'].values()
def _repr_html_(self):
"""HTML repr."""
return Utils.render_html('classification_system.html', classification_system=self)
def __repr__(self):
"""Return the string representation of a classification system object."""
text = f'{self.id}:{self.name}- Version {self.version}'
return text
def __str__(self):
"""Return the string representation of a classification system object."""
return f'<Classification System [{self.id}:{self.name}- Version {self.version}]>'
| [
2,
198,
2,
770,
2393,
318,
636,
286,
11361,
20985,
10074,
329,
262,
406,
4093,
50,
5313,
4809,
13,
198,
2,
15069,
357,
34,
8,
13130,
12,
42334,
3268,
11401,
13,
198,
2,
198,
2,
11361,
20985,
10074,
329,
262,
406,
4093,
50,
5313,
... | 2.663529 | 1,275 |
# 9012.py
for _ in range(int(input())):
string = input()
while string.find("()") != -1:
index = string.find("()")
string = string[:index] + string[index+2:]
if len(string) == 0:
print("YES")
else:
print("NO") | [
171,
119,
123,
2,
860,
30206,
13,
9078,
198,
1640,
4808,
287,
2837,
7,
600,
7,
15414,
28955,
2599,
198,
220,
220,
220,
4731,
796,
5128,
3419,
198,
220,
220,
220,
981,
4731,
13,
19796,
7203,
3419,
4943,
14512,
532,
16,
25,
198,
220... | 2.106557 | 122 |
"""
Tests for binding pocket detection.
"""
import os
import logging
import unittest
import numpy as np
import deepchem as dc
from deepchem.utils import rdkit_utils
from deepchem.utils import coordinate_box_utils as box_utils
logger = logging.getLogger(__name__)
class TestBindingPocket(unittest.TestCase):
"""
Does sanity checks on binding pocket generation.
"""
def test_convex_init(self):
"""Tests that ConvexHullPocketFinder can be initialized."""
dc.dock.ConvexHullPocketFinder()
def test_get_face_boxes_for_protein(self):
"""Tests that binding pockets are detected."""
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, "1jld_protein.pdb")
coords = rdkit_utils.load_molecule(protein_file)[0]
boxes = box_utils.get_face_boxes(coords)
assert isinstance(boxes, list)
# Pocket is of form ((x_min, x_max), (y_min, y_max), (z_min, z_max))
for pocket in boxes:
assert isinstance(pocket, box_utils.CoordinateBox)
def test_convex_find_pockets(self):
"""Test that some pockets are filtered out."""
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, "1jld_protein.pdb")
finder = dc.dock.ConvexHullPocketFinder()
all_pockets = finder.find_all_pockets(protein_file)
pockets = finder.find_pockets(protein_file)
# Test that every atom in pocket maps exists
for pocket in pockets:
assert isinstance(pocket, box_utils.CoordinateBox)
assert len(pockets) < len(all_pockets)
def test_extract_active_site(self):
"""Test that computed pockets have strong overlap with true binding pocket."""
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, "1jld_protein.pdb")
ligand_file = os.path.join(current_dir, "1jld_ligand.sdf")
active_site_box, active_site_coords = \
dc.dock.binding_pocket.extract_active_site(protein_file, ligand_file)
assert isinstance(active_site_box, box_utils.CoordinateBox)
assert isinstance(active_site_coords, np.ndarray)
| [
37811,
198,
51,
3558,
329,
12765,
10000,
13326,
13,
198,
37811,
198,
11748,
28686,
198,
11748,
18931,
198,
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
2769,
15245,
355,
30736,
198,
6738,
2769,
15245,
13,
2... | 2.713188 | 781 |
#!/usr/bin/env python3
""" HIAS Bluetooth IoT Agent Class
HIAS Bluetooth IoT Agents are bridges between HIAS devices that support
Bluetooth/Bluetooth Low Energy protocol and the HIASCDI Context Broker.
MIT License
Copyright (c) 2021 Asociación de Investigacion en Inteligencia Artificial
Para la Leucemia Peter Moss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files(the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Contributors:
- Adam Milton-Barker
"""
from gevent import monkey
monkey.patch_all()
import json
import os
import psutil
import signal
import sys
import time
import threading
from abc import ABC, abstractmethod
from bluepy import btle
from datetime import datetime, timedelta
from flask import Flask, request, Response
from threading import Thread
from modules.AbstractAgent import AbstractAgent
class Agent(AbstractAgent):
""" HIAS Bluetooth IoT Agent Class
HIAS Bluetooth IoT Agents are bridges between HIAS
devices that support Bluetooth/Bluetooth Low Energy
protocol and the HIASCDI Context Broker.
"""
def get_ble_devices(self):
""" Returns a list of HIAS BLE devices """
bles = self.hiascdi.get_ble_devices()
for ble in bles:
self.bles.append((
ble["bluetoothAddress"]["value"],
ble["bluetoothServiceUUID"]["value"],
ble["bluetoothCharacteristicUUID"]["value"],
ble["networkLocation"]["value"],
ble["networkZone"]["value"],
ble["id"]))
def check_ble_devices(self):
""" Checks for disconnected HIAS BLE devices """
for device in self.ble_tracker:
if self.ble_tracker[device]["last_seen"] != "" and self.ble_tracker[device]["last_seen"] < datetime.now() - timedelta(minutes=5):
self.mqtt.device_status_publish(self.ble_tracker[device]["location"],
self.ble_tracker[device]["zone"],
self.ble_tracker[device]["device"],
"OFFLINE")
self.ble_tracker[device]["last_seen"] = ""
self.helpers.logger.info(
"BLE device " + self.ble_tracker[device]["address"] + " disconnected from iotJumpWay")
threading.Timer(100.0, self.check_ble_devices).start()
def ble_connection(self, addr, service, characteristic):
""" Connects to a HIAS BLE device """
while True:
try:
self.helpers.logger.info(
"Attempting BLE connection to "+addr)
peripheral = btle.Peripheral(addr)
peripheral.setMTU(512)
delegate = BtAgentDelegate()
peripheral.withDelegate(delegate)
serv = peripheral.getServiceByUUID(service)
charac = serv.getCharacteristics(characteristic)[0]
peripheral.writeCharacteristic(charac.valHandle + 1, b"\x01\x00")
self.helpers.logger.info(
"BLE connection to " + addr + " established")
if addr in self.ble_tracker:
self.ble_tracker[addr]["last_seen"] = datetime.now()
self.helpers.logger.info(
addr + " connection timestamp updated")
self.notification_loop(peripheral)
except Exception as e:
self.helpers.logger.info(
"BLE connection to " + addr + " failed")
time.sleep(1.0)
continue
def notification_loop(self, peripheral):
""" Notification loop """
try:
if peripheral.waitForNotifications(2.0):
self.helpers.logger.info(
"Awaiting notifications...")
except Exception as e:
pass
finally:
self.helpers.logger.info(
"Disconnecting from HIAS BLE device")
try:
peripheral.disconnect()
time.sleep(4)
except Exception as e:
self.helpers.logger.info(
"Failed to disconnect from HIAS BLE device")
pass
def parse_data(self, data):
""" Parses the data dictionary """
entity_type = data["EntityType"]
entity = data["Entity"]
data_type = data["Type"]
data_value = data["Value"]
data_message = data["Message"]
return entity_type, entity, data_type, data_value, data_message
def respond(self, response_code, response):
""" Returns the request repsonse """
return Response(response=json.dumps(response, indent=4),
status=response_code,
mimetype="application/json")
app = Flask(__name__)
agent = Agent()
@app.route('/About', methods=['GET'])
def about():
"""
Returns Agent details
Responds to GET requests sent to the North Port About API endpoint.
"""
return agent.respond(200, {
"Identifier": agent.credentials["iotJumpWay"]["entity"],
"Host": agent.credentials["server"]["ip"],
"NorthPort": agent.credentials["server"]["port"],
"CPU": psutil.cpu_percent(),
"Memory": psutil.virtual_memory()[2],
"Diskspace": psutil.disk_usage('/').percent,
"Temperature": psutil.sensors_temperatures()['coretemp'][0].current
})
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
36210,
1921,
19263,
38488,
15906,
5016,
198,
198,
25374,
1921,
19263,
38488,
28295,
389,
19432,
1022,
36210,
1921,
4410,
326,
1104,
198,
38676,
16271,
14,
38676,
16271,
7754,
66... | 2.893662 | 1,909 |
# ___________________________________________________________________________
#
# Prescient
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
"""
partitions.py
This module mainly exports a class Partition which will be used to encode
which sources are correlated and therefore should have a copula fit to them.
This will also export a function for parsing a file specifying this partition.
"""
def is_pairwise_disjoint(sets):
"""
This function will determine if a collection of sets is pairwise disjoint.
Args:
sets (List[set]): A collection of sets
"""
all_objects = set()
for collection in sets:
for x in collection:
if x in all_objects:
return False
all_objects.add(x)
return True
def parse_partition_file(filename):
"""
This function will parse out a file which lists the partition of sources.
Two sources are in the same partition if they are expected to be
correlated.
The format of this file should be structured as the following example:
# Partitions File
# Any lines starting with # are ignored
<Group 1>:
-<name_1>
-<name_2>
<Group 2>:
-<name_3>
-<name_4>
Singletons:
-<name_5>
-<name_6>
Note that group names are not parsed out and are rather just used for the
user's own purposes. Also, the Singletons group is not required if there
are no singletons.
Args:
filename (str): The name of the file to parse
Returns:
Partition: The partition of the sources
"""
with open(filename) as f:
eof = False
def get_line():
"""
This function will read in the next line that actually has relevant
data. This is a line that has something other than a comment and
is not blank.
This will eventually cause a StopIteration Error.
Returns:
str: The relevant line
"""
while True:
line = next(f)
text, *_ = line.rstrip().split('#')
text = text.strip()
if text:
return text
def parse_group():
"""
This function will parse out a single group from the text file.
It will advance the file pointer to the line following the last
element of the group
Returns:
List[str]: The list of source names
"""
group = []
nonlocal line
try:
line = get_line()
while line.startswith('-'):
group.append(line[1:])
line = get_line()
except StopIteration:
# This means we've reached the end of the file
nonlocal eof
eof = True
return group
def parse_singletons():
"""
This function will parse out the special singletons field. It will
return a list of singleton lists.
Returns:
List[List[str]]: The list of singletons.
"""
singletons = []
nonlocal line
try:
line = get_line()
while line.startswith('-'):
singletons.append([line[1:]])
line = get_line()
except StopIteration:
# This means we've reached the end of file
nonlocal eof
eof = True
return singletons
sets = []
line = get_line()
while True:
if eof:
break
if line == 'Singletons:':
sets.extend(parse_singletons())
else:
sets.append(parse_group())
return Partition(sets)
class Partition:
"""
This class will produce objects which act as a container for disjoint
sets. This will export methods related to finding the set which contains
a certain object and other related to the mathematical object that is
a partition.
This class overloads __iter__ to iterate through the sets of the partition.
"""
def __init__(self, sets):
"""
This will construct a Partition object given a list of iterables.
It is required that each set be disjoint.
Args:
sets (List[iterable]): A list of disjoint collections that can each
be coerced into a set
"""
self.sets = [set(collection) for collection in sets]
if not is_pairwise_disjoint(self.sets):
raise ValueError("The sets passed in must be pairwise disjoint")
def find_set(self, x):
"""
This will find the set which contains the object x, raising an error
if it is not in any set.
Args:
x: The object you wish to find the containing set for
Returns:
set: The set containing x
"""
for collection in self.sets:
if x in collection:
return collection
else:
raise ValueError("{} is not in any set in the partition".format(x))
def in_partition(self, x):
"""
This will check if an object x is in any set in the partition. It
returns a boolean signifying this fact.
Args:
x: The object you wish to check is in the partition
Returns:
bool: True if x is in the partition, False otherwise
"""
return any([x in collection for collection in self.sets])
def equivalent(self, x, y):
"""
This checks if x and y are equivalent according to the equivalence
relation induced by the partition, i.e., they are the same if they
are in the same set. This raises an error if either are not in the
partition.
Args:
x: The first object in the comparison
y: The second object in the comparison
Returns:
bool: True if they are equivalent
"""
return self.find_set(x) == self.find_set(y)
def add_set(self, set_):
"""
This adds a set to the partition. It first checks that the set is
disjoint from all the other sets in the partition (raising an error
if not) and then adds it to the collection.
Args:
set_: A collection that can be coerced into a set
"""
new_set = set(set_)
# Checking if set_ intersects any set in the partition
if any(new_set & other for other in self.sets):
raise ValueError("{} is not disjoint from all other sets"
.format(set_))
self.sets.append(new_set)
def add_singleton(self, x):
"""
This adds a singleton set {x} to the list of sets. It first checks if
x is in any set before adding raising an error if this is true
Args:
x: The object that you want to insert
"""
self.add_set({x})
def singletons(self):
"""
This function will return all singletons in the partition. This will
return the elements themselves, and not the sets of one elements.
Returns:
List: The list of singletons
"""
# We unpack the set into its first element and rest and the set is
# a singleton if rest has no elements
return [x for (x, *rest) in self.sets if len(rest) == 0]
def to_file(self, filename):
"""
This will write out a partition to a file in the Partitions File
format.
Args:
filename (str): The name of the file to write to
"""
with open(filename, 'w') as f:
for set_ in self.sets:
# Singleton sets have a different format
if len(set_) == 1:
continue
f.write('Group:\n')
for element in set_:
f.write('-{}\n'.format(element))
f.write('\n')
singletons = self.singletons()
if singletons:
f.write('Singletons:\n')
for element in singletons:
f.write('-{}\n'.format(element))
f.write('\n')
| [
2,
220,
220,
27193,
2602,
17569,
198,
2,
198,
2,
220,
1763,
3456,
198,
2,
220,
15069,
12131,
2351,
8987,
1222,
14044,
23555,
286,
3837,
544,
11,
11419,
198,
2,
220,
357,
11251,
7597,
737,
4698,
262,
2846,
286,
17453,
5550,
12,
4535,... | 2.277676 | 3,821 |
#!/usr/bin/env python
import sys
assert sys.version >= '2.3', "Requires Python v2.3 or above"
from distutils.core import setup, Extension
setup(
name = "pytagger",
version = "0.5",
author = "Alastair Tse",
author_email = "alastair@liquidx.net",
url = "http://www.liquidx.net/pytagger/",
description = "Python ID3 Tag Reader and Writer Module",
long_description = "An ID3v1 and ID3v2 tag reader and writer module in pure Python. Supports all standards including ID3v1, ID3v1.1, ID3v2.2, ID3v2.3 and ID3v2.4",
license = "BSD",
py_modules = ["tagger", "tagger.id3v1", "tagger.id3v2", "tagger.exceptions",
"tagger.constants", "tagger.utility", "tagger.id3v2frame",
"tagger.encoding", "tagger.debug"],
scripts = ["mp3check.py", "apic.py"]
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
198,
30493,
25064,
13,
9641,
18189,
705,
17,
13,
18,
3256,
366,
39618,
11361,
410,
17,
13,
18,
393,
2029,
1,
198,
6738,
1233,
26791,
13,
7295,
1330,
9058,
11,
2799... | 2.501608 | 311 |
import falcon
from falcon import testing
from .dataset_fixtures import *
| [
11748,
24215,
1102,
198,
6738,
24215,
1102,
1330,
4856,
198,
198,
6738,
764,
19608,
292,
316,
62,
69,
25506,
1330,
1635,
628,
198
] | 3.304348 | 23 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from getter import getter
from CURE.CURE import CURELearner
import numpy as np
from matplotlib import pyplot as plt
from pathlib import Path
def lossplot(config: dict, save_path: str = None) -> None:
"""Plots the negative of the loss surface. One axis represents the normal direction; the other is a random direction."""
device = config["device"]
get_dataloader, get_transformer, get_inverse_transformer, get_model = getter(
config["dataset"], config["model_name"])
trainloader = get_dataloader(split="train", batch_size=config["batch_size_train"], shuffle=config["shuffle_train"])
testloader = get_dataloader(split="test", batch_size=config["batch_size_test"], shuffle=False)
model = get_model()
if config["use_checkpoint"]:
checkpoint_path = Path("./data/checkpoints/")
transformer = get_transformer()
net_CURE = CURELearner(model, trainloader, testloader, lambda_0=config["lambda_0"], lambda_1=config["lambda_1"], lambda_2=config["lambda_2"], transformer=transformer, trial=None,
image_min=config["image_min"], image_max=config["image_max"], device=config["device"], path=checkpoint_path / "best_model.data", acc=config["accuracy"])
net_CURE.set_optimizer(optim_alg=config["optimization_algorithm"],
args=config["optimizer_arguments"])
net_CURE.import_state(checkpoint_path / config["checkpoint_file"])
model = net_CURE.net
model = model.to(device)
total_params = sum(p.numel() for p in model.parameters())
print("Total number of parameters: {}".format(total_params))
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Number of trainable parameters: {}".format(trainable_params))
transformer = get_transformer()
inverse_transformer = get_inverse_transformer()
L = nn.CrossEntropyLoss()
img_shape = (3, 32, 32)
flatten = False
delta = 30
res = 101
n_points = res**2
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
inputs.requires_grad_()
outputs = model.eval()(inputs)
loss = L(outputs, targets)
gradient = torch.autograd.grad(loss, inputs, create_graph=True)[0]
gradient = torch.flatten(gradient, start_dim=1)
normal = F.normalize(gradient, dim=1)
v = torch.rand_like(torch.zeros(normal.shape), device=device)
v = F.normalize(v, dim=1)
if not flatten:
normal = normal.reshape(inputs.shape)
v = v.reshape(inputs.shape)
for k, x in enumerate(inputs):
scalars = np.linspace(-delta, delta, res)
grid = torch.empty(res, res)
for i in range(res):
for j in range(res):
x_star = x
if flatten:
x_star = torch.flatten(x, start_dim=0)
x_star = x_star + scalars[i]*normal[k] + scalars[j]*v[k]
x_star = x_star.reshape((1,)+img_shape)
y_star = model.eval()(x_star)
y_true = torch.zeros_like(y_star)
grid[i, j] = L(y_star, targets[k].unsqueeze(0)).detach()
grid = grid.detach().numpy()
scalars = np.outer(scalars, np.ones(res))
masks = [scalars, scalars.T]
plt.figure()
ax = plt.axes(projection='3d')
ax.plot_surface(masks[0], masks[1], grid, cmap='viridis', edgecolor='none')
ax.scatter(0, 0, grid[res // 2, res // 2])
ax.set_xlabel('Surface Normal Direction')
ax.set_ylabel('Random Direction')
if save_path is not None: plt.savefig(save_path + f"loss_{k}")
plt.show()
plt.pause(.001) # Prevents blocking
if k > 3:
exit()
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
651,
353,
1330,
651,
353,
198,
6738,
327,
11335,
13,
34,
11335,
1330,
327,
11335,
14961,
1008,
198,
198,
11748,
29... | 2.209775 | 1,821 |
import asyncio
import json
import time
import uuid
from manhattan.utils.chrono import today_tz
from mongoframes import Q
import swm
import tornado.web
__all__ = [
'APIError',
'APIHandler',
'APIErrorHandler',
]
class APIError(tornado.web.HTTPError):
"""
Use `APIError` instead of `HTTPError` to raise an error within API
handlers. The extended exception allows more detailed error information to
be returned to the caller.
"""
# A map of possible error types and their associated HTTP response codes
ERROR_TYPES = {
'error': 500,
'forbidden': 403,
'invalid_request': 400,
'not_found': 404,
'request_limit_exceeded': 429,
'unauthorized': 401
}
def to_json_type(self):
"""Return a JSON safe representation of the error"""
json_error = {'error_type': self.error_type}
if self.hint:
json_error['hint'] = self.hint
if self.arg_errors:
json_error['arg_errors'] = self.arg_errors
return json_error
@classmethod
class APIHandler(swm.servers.RequestHandler):
"""
A base handler for API
"""
@property
@property
# API handlers should be imported here
from . import assets
| [
11748,
30351,
952,
198,
11748,
33918,
198,
11748,
640,
198,
11748,
334,
27112,
198,
198,
6738,
582,
12904,
13,
26791,
13,
11413,
78,
1330,
1909,
62,
22877,
198,
6738,
285,
506,
1659,
859,
274,
1330,
1195,
198,
11748,
1509,
76,
198,
11... | 2.580777 | 489 |
# -*- coding: utf-8 -*-
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides functions embeddable inside you components in order to collect statistics"""
import time
def setLPB(caller, lpb):
"""set the LPB
This function sets the current LPB in the stats component, you should update it from the scheduler each time LPB changes in order to get consinstent stat values.
:param caller: the calling Component (this is usually *self*)
:param lpb: the current lpb
:type caller: Namespace
:type lpb: int
:returns: nothing
:rtype: None
:Example:
from p2ner.core.statsFunctions import setLPB
setLPB(self, 231)
"""
for s in caller.__stats__:
s.setLPB(lpb)
def counter(caller, _name):
"""increment a counter
This function creates a stats key if it doesn't exist yet, and increment it by 1 every time it's called.
:param caller: the calling Component (this is usually *self*)
:param _name: the key name
:type caller: Namespace
:type _name: string (component name,stat name,streamid)
:returns: nothing
:rtype: None
:Example:
from p2ner.core.statsFunctions import counter
counter(self, "myCounter")
"""
_name=getClassInfo(caller,_name)
for s in caller.__stats__:
try:
s.incrementKey(_name)
except:
s.addKey(_name, 1)
def setValue(caller, _name, value,incrX=False):
"""set a stats key to a given value
This function creates a stats key if it doesn't exist yet, and increment it by 1 every time it's called.
:param caller: the calling Component (this is usually *self*)
:param _name: the key name
:param value: the key value
:type caller: Namespace
:type _name: string (component name,stat name,streamid)
"type value: any
:returns: nothing
:rtype: None
:Example:
from p2ner.core.statsFunctions import setValue
setValue(self, "myCounter", 244)
"""
_name=getClassInfo(caller,_name)
for s in caller.__stats__:
try:
s.setKey(_name, value)
except:
s.addKey(_name, value,incrX)
def incrementValuecounter(caller, _name, incr):
"""increment a counter by a given value
This function creates and increment a counter by a given value every time it's called.
:param caller: the calling Component (this is usually *self*)
:param _name: the counter name
:param incr: the increment value
:type caller: Namespace
:type _name: string (component name,stat name,streamid)
:type incr: int
:returns: nothing
:rtype: None
:Example:
from p2ner.core.statsFunctions import incrementValuecounter
incrementValuecounter(self, "myCounter", 23)
"""
_name=getClassInfo(caller,_name)
for s in caller.__stats__:
try:
s.incrementKey(_name, incr)
except:
s.addKey(_name, incr)
#def ratio(caller, _name, _up, _down):
# """increment a counter
#
# This function creates and increment a counter every time it's called.
#
# :param caller: the calling Component (this is usually *self*)
# :param _name: the counter name
# :type caller: Namespace
# :type lpb: string
# :returns: nothing
# :rtype: None
#
# :Example:
#
# from p2ner.core.statsFunctions import counter
# counter(self, "myCounter")
#
# """
# if caller.__stats__:
# if caller.__stats__.hasKey(_down):
# d = caller.__stats__.getKey(_down)
# n = 0
# if caller.__stats__.hasKey(_up):
# n = caller.__stats__.getKey(_up)
# r = float(n)/d
# try:
# caller.__stats__.setKey(_name, r)
# except:
# caller.__stats__.addKey(_name, r)
#
#def timeratio(caller, _name, _up):
# """increment a counter
#
# This function creates and increment a counter every time it's called.
#
# :param caller: the calling Component (this is usually *self*)
# :param _name: the counter name
# :type caller: Namespace
# :type lpb: string
# :returns: nothing
# :rtype: None
#
# :Example:
#
# from p2ner.core.statsFunctions import counter
# counter(self, "myCounter")
#
# """
# if caller.__stats__:
# if hasattr(caller.__stats__, 't0'):
# n = 0
# d = time.time() - caller.__stats__.t0
# if caller.__stats__.hasKey(_up):
# n = caller.__stats__.getKey(_up)
# r = float(n)/d
# try:
# caller.__stats__.setKey(_name, r)
# except:
# caller.__stats__.addKey(_name, r)
def dumpStats(caller):
"""dumps the current stats dictionary
This functions returns a copy of the stats dictionary with all stats key/values
:param caller: the calling Component (this is usually *self*)
:type caller: Namespace
:returns: { 'key1':value1, 'key2':value2, ...}
:rtype: dict
:Example:
from p2ner.core.statsFunctions import caller
dumpStats()
"""
ret = {}
for s in caller.__stats__:
ret = s.dumpKeys()
return ret
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
220,
220,
15069,
2321,
26068,
271,
2744,
1031,
4496,
11,
13231,
271,
1951,
461,
29207,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
... | 2.490484 | 2,312 |
from __future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
from pony.orm.tests.testutils import *
if __name__ == '__main__':
unittest.main()
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
3601,
62,
8818,
11,
7297,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
26902,
13,
579,
13,
7295,
1330,
1635,
198,
6738,
26902,
13,
579,
13,
41989,
13,
9288,
26791,
1330,
1635,
1... | 3.046154 | 65 |
"""v21.12.0_feature/#207
Revision ID: 52b9095d9dff
Revises: 05eab8fbc21b
Create Date: 2021-10-08 13:40:57.416791
"""
from alembic import op
import sqlalchemy as sa
from app.database import get_db_schema, engine
from config import MAX_UPLOAD_FILE_SIZE
# revision identifiers, used by Alembic.
revision = '52b9095d9dff'
down_revision = '05eab8fbc21b'
branch_labels = None
depends_on = None
def upgrade():
"""
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('upload_file',
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('file_id', sa.String(length=36), nullable=False),
sa.Column('issuer_address', sa.String(length=42), nullable=False),
sa.Column('relation', sa.String(length=50), nullable=True),
sa.Column('file_name', sa.String(length=256), nullable=False),
sa.Column('content', sa.LargeBinary(length=MAX_UPLOAD_FILE_SIZE), nullable=False),
sa.Column('content_size', sa.Integer(), nullable=True),
sa.Column('description', sa.String(length=1000), nullable=True),
sa.PrimaryKeyConstraint('file_id')
, schema=get_db_schema())
# ### end Alembic commands ###
"""
if engine.name == "mysql":
# NOTE:
# Specify length, because switch column type to BLOB, MEDIUMBLOB, and LONGBLOB.
# Don’t support modifying environment variable value after migration.
content_column = sa.Column('content', sa.LargeBinary(length=MAX_UPLOAD_FILE_SIZE), nullable=False)
else:
content_column = sa.Column('content', sa.LargeBinary(), nullable=False)
op.create_table('upload_file',
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('file_id', sa.String(length=36), nullable=False),
sa.Column('issuer_address', sa.String(length=42), nullable=False),
sa.Column('relation', sa.String(length=50), nullable=True),
sa.Column('file_name', sa.String(length=256), nullable=False),
content_column,
sa.Column('content_size', sa.Integer(), nullable=True),
sa.Column('description', sa.String(length=1000), nullable=True),
sa.PrimaryKeyConstraint('file_id'),
schema=get_db_schema())
| [
37811,
85,
2481,
13,
1065,
13,
15,
62,
30053,
31113,
22745,
198,
198,
18009,
1166,
4522,
25,
6740,
65,
44675,
20,
67,
24,
67,
487,
198,
18009,
2696,
25,
8870,
68,
397,
23,
69,
15630,
2481,
65,
198,
16447,
7536,
25,
33448,
12,
940,... | 2.373424 | 1,031 |
df = pd.read_csv(link, sep=';') | [
7568,
796,
279,
67,
13,
961,
62,
40664,
7,
8726,
11,
41767,
11639,
26,
11537
] | 2.066667 | 15 |