blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d36a0fd44877c71c01b65bf4986938b78a9d64dc | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/remove_invalid_submissions.py | bea3c87a9227a23ef93473ae7d1cd253ca981bc3 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 2,261 | py | import os
from collections import Counter
import shutil
import sys
import argparse
import tempfile
# for example call this script on hamming from exercism_data directory like: python remove_invalid_submissions.py "ruby/hamming/" "filtered-submissions/" "compute"
parser = argparse.ArgumentParser(description='Filter and clean data and put in src directory')
parser.add_argument('data_directory', help='Directory for original data')
parser.add_argument('filtered_submissions', help='Directory to store filtered data')
parser.add_argument('method', help='Method to extract')
args = parser.parse_args()
data_directory = args.data_directory
filtered_submissions =args.filtered_submissions
method = args.method
mapfile = open("mapping.csv", "w")
count = 0
for f in os.listdir(data_directory):
count+=1
print 'starting with submissions: ' + str(count)
if not os.path.isdir(filtered_submissions):
os.mkdir(filtered_submissions)
else:
shutil.rmtree(filtered_submissions)
os.mkdir(filtered_submissions)
if not os.path.isdir('src'):
os.mkdir('src')
else:
shutil.rmtree('src')
os.mkdir('src')
for f in os.listdir(data_directory):
t = tempfile.NamedTemporaryFile(mode="r+")
with open(data_directory+f) as filename:
for line in filename:
line = line.partition('#')[0]
line = line.rstrip() + "\n"
t.write(line)
t.seek(0)
data = t.read()
#if not (data.count('def') == 1 or data.find('def word_count') == -1 or data.find('def initialize') == -1):
if data.count('def') == 1 and (data.find('def self.' + str(method)) != -1 or data.find('def ' + str(method)) != -1):
data = data.replace('def self.' + str(method), 'def ' + str(method))
num_ends_to_strip = data.count('class') + data.count('module')
data = data[data.find('def ' + str(method)):]
for i in range(num_ends_to_strip):
data = data[:data.rfind('end')]
data = data.rstrip()
out = open(filtered_submissions+f, "w+")
out.write(data)
t.close()
count = 0
for f in os.listdir(filtered_submissions):
submission_id = f.strip(".rb")
index_id = len(os.listdir('src'))
shutil.copyfile(filtered_submissions+f, 'src/'+str(index_id)+'.rb')
mapfile.write(str(submission_id) + ' : ' + str(index_id) + '\n')
count += 1
print 'filtered to submissions: ' + str(count)
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
e11206922c5bd7228c5c08eb3fe99d1392f48b8b | f662410e5c7735f953852998c342b02fc39e0a04 | /aritmetica_imagenes/Practica06/main.py | be7d28e1adb3d54778fdab47a15036f3528845ca | [] | no_license | LeDaVR/CG | 8cca8b789ca7816318033adca53e6c78a63ccb9c | f5bec63659b2e48bb5be367577d0bca174535397 | refs/heads/main | 2023-06-23T23:49:42.982570 | 2021-07-27T20:50:22 | 2021-07-27T20:50:22 | 385,108,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,968 | py | import sys
import cv2
import math
import numpy as np
from matplotlib import pyplot as plt
import random
def threshold(img,threshold):
imgres = img.copy()
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if imgres[i,j] < threshold:
imgres[i,j] = 0
else:
imgres[i,j] = 255
return imgres
def ANDfunc(p,q):
return p | q
def ORfunc(p,q):
return p & q
def XORfunc(p,q):
return (p ^ q)
def imagemerge(img1,img2,func):
imgres = img.copy()
for i in range(img.shape[0]):
for j in range(img.shape[1]):
imgres[i,j] = func(img1[i,j],img2[i,j])
return imgres
t = 120
archivo = 'img1'
archivo2 = 'img2'
img = cv2.imread(archivo+'.jpg', cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread(archivo2+'.jpg', cv2.IMREAD_GRAYSCALE)
cv2.imwrite('imgs.jpg',np.concatenate([img,img2],axis=1))
avg =0
##plots
fig, axs = plt.subplots(2)
fig.suptitle('Vertically stacked subplots')
axs[0].hist(img.flatten(),bins=256,range=(0,256))
# plt.show()
##
imgfin= []
# img 1
t = 175
imgres = threshold(img,t)
# cv2.imshow(archivo+' t = '+str(t),imgres)
imgfin.append(imgres.copy())
cv2.imwrite('img1res.jpg',imgres)
# img2
t = 176
imgres = threshold(img2,t)
# cv2.imshow(archivo2+' t = '+str(t),imgres)
imgfin.append(imgres.copy())
cv2.imwrite('img2res.jpg',imgres)
##Threshold
cv2.imwrite('imgsres.jpg',np.concatenate([imgfin[0],imgfin[1]],axis=1))
## AND
imgres = imagemerge(imgfin[0],imgfin[1],ANDfunc)
cv2.imshow('AND operation',imgres)
cv2.imwrite('AND'+'.jpg',imgres)
## OR
imgres = imagemerge(imgfin[0],imgfin[1],ORfunc)
cv2.imshow('OR operation',imgres)
cv2.imwrite('OR'+'.jpg',imgres)
## XOR
imgres = imagemerge(imgfin[0],imgfin[1],XORfunc)
cv2.imshow('XOR operation',imgres)
cv2.imwrite('XOR'+'.jpg',imgres)
finalimg = []
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"noreply@github.com"
] | LeDaVR.noreply@github.com |
f289bf01abb3959a7ab1947392b25f7ed8265e3f | 9562a72a5c5e8647a9bc6442bdb556ddbb13c908 | /SSI/sample_mouse_data/chain/features.py | e8cc15db31a6dd4a079a337551cfaec766d26261 | [] | no_license | BIAPT/Scripts | e7529c99bf744967beb7ce3a02d311ac31578ca9 | 9ee5016c79b4768dd44492136a3c020516cc43e5 | refs/heads/master | 2022-11-27T06:16:06.184576 | 2022-01-05T02:00:24 | 2022-01-05T02:00:24 | 193,906,623 | 8 | 5 | null | 2022-11-22T04:58:19 | 2019-06-26T13:09:29 | Jupyter Notebook | UTF-8 | Python | false | false | 1,860 | py |
import math
import numpy
def mean(numbers):
return float(sum(numbers)) / max(len(numbers), 1)
def var(numbers, mean):
var = 0
for i in range(len(numbers)):
var = var + (numbers[i] - mean)**2
var = var / len(numbers)
return var
def calc_feature_from_sample(sample, opts):
ret = []
features = opts['features']
listX, listY = zip(*sample)
if features['min']:
ret.append(min(listX))
ret.append(min(listY))
if features['max']:
ret.append(max(listX))
ret.append(max(listY))
if features['dist']:
distance = 0
for i in range((sample.__len__()) - 1):
distance = distance + math.sqrt((listX[i+1] - listX[i])**2 + (listY[i+1] - listY[i])**2)
ret.append(distance)
if features['avg']:
ret.append(mean(listX))
ret.append(mean(listY))
if features['var']:
distanceVector = numpy.zeros(sample.__len__()-1)
for i in range((sample.__len__()) - 1):
distanceVector[i] = math.sqrt((listX[i+1] - listX[i])**2 + (listY[i+1] - listY[i])**2)
ret.append(var(distanceVector, mean(distanceVector)))
if features['std']:
distanceVector = numpy.zeros(sample.__len__()-1)
for i in range((sample.__len__()) - 1):
distanceVector[i] = math.sqrt((listX[i+1] - listX[i])**2 + (listY[i+1] - listY[i])**2)
ret.append(math.sqrt(var(distanceVector, mean(distanceVector))))
return ret
if __name__ == '__main__':
x = [0,1,0,0,0]
y = [0,0,0,0,0]
opts = {
'features' : {
'min': True,
'max' : True,
'avg' : True,
'dist' : True,
'var' : True,
'std' : True
}
}
samples = list(zip(x,y))
print(samples)
print(calc_feature_from_sample(samples, opts))
print('stop')
| [
"parisa.alirezaee@mail.mcgill.ca"
] | parisa.alirezaee@mail.mcgill.ca |
49a3e571fec6d3d8242ceda928d84dd0d2a9a00c | e510ee167c1208ebd355cd91d3f1de8bd327b2ea | /feedbacks/migrations/0001_initial.py | d71eb030866730178b9e647f4aa6a5b2b9dc157a | [] | no_license | andrsko/mts-server | 2834780337f390ea31fffa48d5c34638e9d646b2 | 7d2a9a1d17cee3304fad50022d9739412edd3330 | refs/heads/master | 2023-02-23T07:22:47.499717 | 2021-01-24T09:58:34 | 2021-01-24T09:58:34 | 324,479,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | # Generated by Django 3.1.4 on 2021-01-03 08:52
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Feedback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(blank=True, max_length=100, null=True)),
('name', models.CharField(blank=True, max_length=100, null=True)),
('timestamp', models.DateTimeField(auto_now_add=True, verbose_name='timestamp')),
('topic', models.CharField(blank=True, max_length=100, null=True)),
('message', models.CharField(max_length=500)),
],
),
]
| [
"andrsvchnko@gmail.com"
] | andrsvchnko@gmail.com |
70bd5680f2a27a2fd01a48cc7c6139b5b111f9df | 62584d662feddc5cc9eec0590ce0f1d0876a007e | /listen.py | 235b848ef4369090fd59ccecdf38e84f02958b62 | [] | no_license | rcmantovani/flask_listen | 9dc0b458d63cc28176258dbbc0fd9f069040d278 | b95ed02ce845d25e2871a104deebcb29f7220503 | refs/heads/master | 2016-09-05T19:52:53.757993 | 2015-06-17T11:26:57 | 2015-06-17T11:26:57 | 37,591,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,110 | py | from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
import requests
import os
import time
from config import Config
from urllib import urlencode
class CloudantListener(StreamListener):
"""
A listener handles tweets are the received from the stream.
Pipes tweets to Cloudant using Requests
"""
def on_data(self, data):
tweet = json.loads(data)
if not 'delete' in tweet:
# truncate tweet to keep db manageable
try:
tweet_data = {
"_id": str(tweet['id']),
"created_at": tweet['created_at'],
"text": tweet['text'],
"lang": tweet['lang'],
"geo": tweet['geo'],
"coordinates": tweet['coordinates']
}
except:
print tweet
raise
# normalize geodata using geonames
if tweet_data.get('geo'):
query = urlencode({
"username": Config.geo_user,
"lat": tweet_data['geo']['coordinates'][0],
"lng": tweet_data['geo']['coordinates'][1],
})
elif tweet_data.get('coordinates'):
query = urlencode({
"username": Config.geo_user,
"lat": tweet_data['coordinates']['coordinates'][1],
"lng": tweet_data['coordinates']['coordinates'][0],
})
if tweet_data.get('geo') or tweet_data.get('coordinates'):
url = '?'.join([Config.geo_url, query])
r = requests.get(url)
tweet_data.update(r.json())
# insert to database
r = requests.post(Config.db_url, data=json.dumps(tweet_data), headers={"Content-Type":"application/json"})
if r.status_code == 409:
# if revision conflict, update _rev
# this will happen: https://dev.twitter.com/docs/streaming-apis/processing#Duplicate_messages
r = requests.get('/'.join([Config.db_url, str(tweet_data['_id'])]))
tweet_data['_rev'] = r.json()['_rev']
r = requests.post(Config.db_url, data=json.dumps(tweet_data), headers={"Content-Type":"application/json"})
if r.status_code not in [200, 201, 202]:
# if we failed, even after catching 409, say so
print r.status_code, r.json()
return True
def on_error(self, status):
print status
def listen():
l = CloudantListener()
auth = OAuthHandler(Config.consumer_key, Config.consumer_secret)
auth.set_access_token(Config.access_token, Config.access_token_secret)
stream = Stream(auth, l, timeout=36000000)
def attempt_connect(wait=1):
try:
stream.sample()
except Exception as e:
print e
time.sleep(wait)
attempt_connect(wait * 2)
attempt_connect()
if __name__ == '__main__':
listen() | [
"garbados@gmail.com"
] | garbados@gmail.com |
133b56f5e2c93963c54112681608be57d6123b35 | 1f82c55c0f490949bbfaf773f2dda283a070df23 | /model.py | b6ab8abfa89608afad483cfae0120c00f832e55d | [] | no_license | mohann12/drowsyness-dectection | a6e1f938aff7dd929075bb1a1e1fb52ab60d4a6e | 8219afae2eb298e06bf5905eab09861a47f417d6 | refs/heads/main | 2023-08-29T04:21:09.589887 | 2021-09-30T05:42:15 | 2021-09-30T05:42:15 | 394,717,987 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | from keras.preprocessing import image
from keras.models import Sequential
from keras.layers import Dropout,Conv2D,Flatten,Dense, MaxPooling2D
def generator(dir, gen=image.ImageDataGenerator(rescale=1./255), shuffle=True,batch_size=1,target_size=(24,24),class_mode='categorical' ):
return gen.flow_from_directory(dir,batch_size=batch_size,shuffle=shuffle,color_mode='grayscale',class_mode=class_mode,target_size=target_size)
train_batch= generator('data/train',shuffle=True, batch_size=3,target_size=(24,24))
valid_batch= generator('data/valid',shuffle=True, batch_size=1,target_size=(24,24))
model = Sequential([
Conv2D(16, kernel_size=(3, 3), activation='relu', input_shape=(24,24,1)),
MaxPooling2D(pool_size=(1,1)),
Conv2D(32,(3,3),activation='relu'),
MaxPooling2D(pool_size=(1,1)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(1,1)),
Dropout(0.25),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(2, activation='softmax')
])
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
model.fit_generator(train_batch, validation_data=valid_batch,epochs=15,steps_per_epoch=5 ,validation_steps=5)
model.save('models/ cnnCat2.h5', overwrite=True) | [
"noreply@github.com"
] | mohann12.noreply@github.com |
b80cff1f63fc85e2e367363d8d4217c52f1bcb9c | 3e3741d9ea06f1dcd560e27145256bd3177bed14 | /01_py基础/第2周/day01/test05.py | 0b0877d5b07aa6ba958a21bc84e6b7a6d5a0890e | [] | no_license | Lousm/Python | 778bc730db09ab135bf53c7b62af29df2407199a | d3f19600012b3576cd5d58df510c17590fcaec14 | refs/heads/master | 2020-03-26T16:40:01.188306 | 2018-11-06T03:56:20 | 2018-11-06T03:56:20 | 145,116,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | a = [i*10 for i in range(1, 13)]
b = [i*2 for i in range(1, 13)]
def sy(a, b):
c = []
for i in range(len(a)):
c.append(a[i]+b[i])
return c
c = sy(a, b)
print(c)
| [
"mr_lousm@163.com"
] | mr_lousm@163.com |
1b6b591a2a8ad31a5c1bd110be072f800865522b | e838ea567fe5216bd83b72d5cc549363a666ac3d | /community/migrations/0001_initial.py | 756f713d6f763cc2a681b7383369ae2c3dc63f28 | [] | no_license | iuriramos/swim-registry | f7ffee9a57b92021e7066820249092d1558a944d | 7c71d294b5aa7cb40e01ed559e2fcb81d2e1f43a | refs/heads/master | 2021-09-13T20:22:29.624535 | 2018-05-03T21:30:26 | 2018-05-03T21:30:26 | 85,312,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,544 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-18 18:20
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('registry', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Participant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=255)),
('description', models.TextField(null=True)),
('image', models.ImageField(default='participants/images/profiles/none/default.jpg', upload_to='participants/images/profiles/')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ParticipantCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('AIRSPACE COMPANY', 'Airspace company'), ('RESEARCH ORGANIZATION', 'Research Organization'), ('AIRPORT', 'Airport'), ('AERODROME', 'Aerodrome'), ('RESEARCH INSTITUTION', 'Research Institution'), ('PUBLIC AGENCY', 'Public Agency'), ('OTHER', 'Other')], max_length=50, unique=True)),
],
options={
'verbose_name_plural': 'participant categories',
'verbose_name': 'participant category',
},
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('notification_frequency', models.CharField(choices=[('NEVER', 'Never'), ('IMMEDIATE', 'Immediate'), ('DAILY', 'Daily'), ('WEEKLY', 'Weekly')], default='NEVER', max_length=10)),
('following_organizations', models.ManyToManyField(related_name='followers', to='community.Participant')),
('subscriptions_activity', models.ManyToManyField(related_name='profiles', to='registry.ActivityCategory')),
('subscriptions_content_type', models.ManyToManyField(related_name='profiles', to='registry.SubscriptionContentType')),
('subscriptions_data', models.ManyToManyField(related_name='profiles', to='registry.DataCategory')),
('subscriptions_flight_phase', models.ManyToManyField(related_name='profiles', to='registry.FlightPhaseCategory')),
('subscriptions_region', models.ManyToManyField(related_name='profiles', to='registry.RegionCategory')),
('subscriptions_stakeholder', models.ManyToManyField(related_name='profiles', to='registry.StakeholderCategory')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='RegistrationRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email', models.EmailField(max_length=254)),
('organization', models.CharField(max_length=255)),
('role', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='participant',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='participants', to='community.ParticipantCategory'),
),
]
| [
"iuri.srb@gmail.com"
] | iuri.srb@gmail.com |
b46422a949e65f9e0a70a77877dc9dc67c456dbf | ff439153e551f58850ecfb32d1b067c9a5d7405d | /krcal/core/ltmaps_test.py | 9a01f3bf67925c9760722ab399af3e8c2dc0ae63 | [] | no_license | jmhaefner/ICAROS | 79d18b9f372e0d8cd180d9c4ba7572d9ec1efcf3 | 4477157abd9ab5df42a28012c167d9e0a9a1ce5a | refs/heads/master | 2021-08-06T14:27:50.179501 | 2021-06-24T19:31:29 | 2021-06-24T19:31:29 | 167,604,724 | 0 | 1 | null | 2019-01-25T19:50:55 | 2019-01-25T19:50:54 | null | UTF-8 | Python | false | false | 8,249 | py | import os
import pandas as pd
import numpy as np
from invisible_cities.io.dst_io import load_dsts
from krcal.core.core_functions import time_delta_from_time
from krcal.core.analysis_functions import kr_event
from krcal.core.analysis_functions import event_map
from krcal.core.analysis_functions import select_xy_sectors
from krcal.core.analysis_functions import select_rphi_sectors
from krcal.core.fit_lt_functions import get_time_series
from krcal.core.fit_lt_functions import time_fcs
from krcal.core.fit_lt_functions import fit_fcs_in_xy_bin
from krcal.core.fit_lt_functions import fit_fcs_in_rphi_sectors
from krcal.core.fit_lt_functions import fit_map_xy
from krcal.core.rphi_maps_functions import rphi_sector_map_def
from krcal.core.rphi_maps_functions import define_rphi_sectors
from krcal.core.kr_types import FitType, KrSector, MapType
import warnings
import pytest
@pytest.fixture(scope='session')
def DST(dst_filenames_path):
dst = load_dsts(dst_filenames_path, "DST", "Events")
dst_time = dst.sort_values('event')
T = dst_time.time.values
DT = time_delta_from_time(T)
kge = kr_event(dst, DT, dst.S2e, dst.S2q)
return dst, DT, kge
@pytest.fixture(scope='session')
def time_series(DST):
nt = 10
dst, DT, kge = DST
ts, masks = get_time_series(nt, (DT[0],DT[-1]), kge)
return nt, ts, masks
@pytest.fixture(scope='session')
def kBins():
return np.array([-200., -120., -40., 40., 120., 200.])
def test_get_time_series(time_series, DST):
dst, DT, kge = DST
nt, ts, masks = time_series
lengths = [len(mask)for mask in masks]
assert len(masks) == len(ts) == nt
assert len(masks[0]) == len(kge.X) == len(dst)
assert np.equal(lengths, len(dst) * np.ones(len(lengths))).all()
def test_time_fcs(time_series, DST):
dst, DT, kge = DST
nt, ts, masks = time_series
fps = time_fcs(ts, masks, kge,
nbins_z = 10,
nbins_e = 25,
range_z = (50, 550),
range_e = (5000, 13500),
energy = 'S2e',
fit = FitType.profile)
fpu = time_fcs(ts, masks, kge,
nbins_z = 10,
nbins_e = 25,
range_z = (50, 550),
range_e = (5000, 13500),
energy = 'S2e',
fit = FitType.unbined)
assert np.allclose(fps.e0, fpu.e0, rtol=1e-02)
assert np.allclose(fps.lt, fpu.lt, rtol=1e-02)
def test_fit_fcs_in_xy_bin(DST, kBins):
dst, DT, kge = DST
KRE = select_xy_sectors(dst, DT, dst.S2e.values, dst.S2q.values, kBins, kBins)
neM = event_map(KRE)
fps_p = fit_fcs_in_xy_bin (xybin = (2,2),
selection_map = KRE,
event_map = neM,
n_time_bins = 1,
time_diffs = DT,
nbins_z = 25,
nbins_e = 50,
range_z =(50, 550),
range_e = (5000, 13500),
energy = 'S2e',
fit = FitType.profile,
n_min = 100)
fps_u = fit_fcs_in_xy_bin (xybin = (2,2),
selection_map = KRE,
event_map = neM,
n_time_bins = 1,
time_diffs = DT,
nbins_z = 25,
nbins_e = 50,
range_z =(50, 550),
range_e = (5000, 13500),
energy = 'S2e',
fit = FitType.unbined,
n_min = 100)
np.allclose(fps_p.e0 / fps_u.e0, 1, rtol=1e-02)
np.allclose(fps_p.lt / fps_u.lt, 1, rtol=1e-02)
def test_fit_fcs_in_rphi_sectors(DST, kBins):
dst, DT, kge = DST
rpsmf = rphi_sector_map_def(nSectors =4, rmax =200, sphi =90)
rps = define_rphi_sectors(rpsmf)
KRES = select_rphi_sectors(dst, DT, dst.S2e, dst.S2q, rps)
neM = event_map(KRES)
fcs_u = fit_fcs_in_rphi_sectors(sector = 0,
selection_map = KRES,
event_map = neM,
n_time_bins =1,
time_diffs =DT,
nbins_z = 25,
nbins_e = 50,
range_z =(50, 550),
range_e = (5000, 13500),
energy = 'S2e',
fit = FitType.unbined,
n_min = 100)
fcs_p = fit_fcs_in_rphi_sectors(sector = 0,
selection_map = KRES,
event_map = neM,
n_time_bins =1,
time_diffs =DT,
nbins_z = 25,
nbins_e = 50,
range_z =(50, 550),
range_e = (5000, 13500),
energy = 'S2e',
fit = FitType.profile,
n_min = 100)
for i in range(4):
np.allclose(fcs_u[i].e0 / fcs_p[i].e0, 1, rtol=1e-02)
np.allclose(fcs_u[i].lt / fcs_p[i].lt, 1, rtol=1e-02)
def test_select_xy_sectors(DST, kBins):
dst, DT, kge = DST
KRE = select_xy_sectors(dst, DT, dst.S2e.values, dst.S2q.values, kBins, kBins)
neM = event_map(KRE)
l = ((neM[0]/neM[4]).values > 0.8).all()
r = ((neM[0]/neM[4]).values < 1.1).all()
assert l & r
def test_fit_xy_map(DST, kBins):
dst, DT, kge = DST
def get_maps_t0(fmxy):
pE0 = {}
pLT = {}
pC2 = {}
for nx in fmxy.keys():
pE0[nx] = [fmxy[nx][ny].e0[0] for ny in range(len(fmxy[nx]))] # notice [0] ts bin
pLT[nx] = [fmxy[nx][ny].lt[0] for ny in range(len(fmxy[nx]))]
pC2[nx] = [fmxy[nx][ny].c2[0] for ny in range(len(fmxy[nx]))]
return (pd.DataFrame.from_dict(pE0),
pd.DataFrame.from_dict(pLT),
pd.DataFrame.from_dict(pC2))
KRE = select_xy_sectors(dst, DT, dst.S2e.values, dst.S2q.values, kBins, kBins)
neM = event_map(KRE)
fpmxy = fit_map_xy(selection_map = KRE,
event_map = neM,
n_time_bins = 1,
time_diffs = DT,
nbins_z = 25,
nbins_e = 50,
range_z =(50, 550),
range_e = (5000, 13500),
energy = 'S2e',
fit = FitType.profile,
n_min = 100)
mE0p, mLTp, mC2p = get_maps_t0(fpmxy)
fumxy = fit_map_xy(selection_map = KRE,
event_map = neM,
n_time_bins = 1,
time_diffs = DT,
nbins_z = 25,
nbins_e = 50,
range_z =(50, 550),
range_e = (5000, 13500),
energy = 'S2e',
fit = FitType.unbined,
n_min = 100)
mE0u, mLTu, mC2u = get_maps_t0(fumxy)
r1 = (mLTp / mLTu).values
l1 = np.allclose(r1, 1, rtol=1e-01)
r2 = mE0p / mE0u
l2 = np.allclose(r2, 1, rtol=1e-02)
assert l1 & l2
| [
"jjgomezcadenas@gmail.com"
] | jjgomezcadenas@gmail.com |
e67dd18e17853bde0845ae57c5ee63c25d10828b | a657283ae5208611351606f35b05f46f63581d5c | /website/routes.py | 83404e7b3b86c06c28d0c50b12f5eb7115140b6e | [] | no_license | rrkas/handwriting-generation-flask | e17c71f0335231a6157c728c78ce4c30d7d6df61 | 049091b1a3d341af0ce50e07d484c1bbf98fd3d8 | refs/heads/master | 2023-07-14T22:12:56.482115 | 2021-08-29T10:14:56 | 2021-08-29T10:14:56 | 391,993,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,975 | py | import io
import os
import uuid
import pywhatkit as kit
from flask import *
from werkzeug.datastructures import FileStorage
from website import app
output_dir = os.path.join('website', 'static', 'output')
if not os.path.exists(output_dir):
os.mkdir(output_dir)
allowed_file_ext = ['txt']
def allowed_file(filename):
return filename.split('.')[-1] in allowed_file_ext
def generate_signature(file):
try:
output_filename = str(uuid.uuid4().hex)
if isinstance(file, FileStorage):
if os.path.exists('pywhatkit_dbs.txt'):
os.remove('pywhatkit_dbs.txt')
file.save(os.path.join(output_dir, output_filename + '.txt'))
with open(os.path.join(output_dir, output_filename + '.txt'), 'r') as f:
text = f.read()
os.remove(os.path.join(output_dir, output_filename + '.txt'))
else:
text = file
kit.text_to_handwriting(
string=text,
rgb=(0, 0, 0),
save_to=os.path.join(output_dir, output_filename + '.png'),
)
return output_filename, True
except BaseException as e:
print(e)
return str(e), False
@app.route('/', methods=['POST', 'GET'])
def home():
if request.method == 'POST':
# print("request", request)
print("form", request.form)
if request.form.get('inputtype') == 'file':
if 'file' not in request.files:
flash('No file part!')
return redirect(request.url)
file = request.files.get('file')
if not allowed_file(file.filename):
flash('Invalid File!')
return redirect(request.url)
else:
file = request.form.get('text')
img_name, valid = generate_signature(file)
if valid:
flash('Image Generated Successfully!', 'success')
else:
flash('Something went wrong! Please try again!!', 'error')
return redirect(request.url)
return redirect(url_for('home', img_name=img_name))
filename = request.args.get('img_name')
result_path = os.path.join(output_dir, str(filename) + '.png')
if filename and not os.path.exists(result_path):
abort(404)
return render_template('home.html', img_name=request.args.get('img_name'))
@app.route('/download/<string:filename>', methods=['GET', 'POST'])
def download(filename):
result_path = os.path.join(output_dir, filename + '.png')
if not os.path.exists(result_path):
abort(404)
return_data = io.BytesIO()
with open(result_path, 'rb') as fo:
return_data.write(fo.read())
return_data.seek(0)
os.remove(result_path)
return send_file(
return_data,
mimetype='image/png',
as_attachment=True,
attachment_filename='txt2handwriting.png'
)
@app.errorhandler(404)
def error_400(error):
return render_template('errors/404.html')
| [
"rrka79wal@gmail.com"
] | rrka79wal@gmail.com |
611500bc11e4bf0093b270c1e76a4ec33c642061 | e61e664d95af3b93150cda5b92695be6551d2a7c | /vega/security/load_pickle.py | 206511004faf87ad32800052b332f62e12f296b8 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | huawei-noah/vega | 44aaf8bb28b45f707ed6cd4e871ba70fc0c04846 | 12e37a1991eb6771a2999fe0a46ddda920c47948 | refs/heads/master | 2023-09-01T20:16:28.746745 | 2023-02-15T09:36:59 | 2023-02-15T09:36:59 | 273,667,533 | 850 | 184 | NOASSERTION | 2023-02-15T09:37:01 | 2020-06-20T08:20:06 | Python | UTF-8 | Python | false | false | 1,812 | py | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Load pickle."""
import pickle
__all__ = ["restricted_loads"]
safe_builtins = {
'vega',
'torch',
'torchvision',
'functools',
'timm',
'mindspore',
'tensorflow',
'numpy',
'imageio',
'collections',
'apex',
'ascend_automl'
}
class RestrictedUnpickler(pickle.Unpickler):
"""Restrict unpickler."""
def __init__(self, file, fix_imports, encoding, errors, security):
super(RestrictedUnpickler, self).__init__(file=file, fix_imports=fix_imports, encoding=encoding, errors=errors)
self.security = security
def find_class(self, module, name):
"""Find class."""
_class = super().find_class(module, name)
if self.security:
if module.split('.')[0] in safe_builtins:
return _class
raise pickle.UnpicklingError(f"global '{module}' is forbidden")
else:
return _class
def restricted_loads(file, fix_imports=True, encoding="ASCII", errors="strict", security=False):
"""Load obj."""
return RestrictedUnpickler(file, fix_imports=fix_imports, encoding=encoding, errors=errors,
security=security).load()
| [
"zhangjiajin@huawei.com"
] | zhangjiajin@huawei.com |
486752af90a81014c8a2c8b798d2c1b5fc1c35eb | 9dbe507104b03275b1ed5dc91a4aaa2ae6af4f51 | /hearthbreaker/cards/minions/shaman.py | 800985cec4d413a7eaac702479486e1dcdcc24bf | [
"MIT"
] | permissive | bussiere/hearthbreaker | 55fc7c77d8ffb37cda2b5d9afb7ccd44c250702c | 074e20de3498d078877e77b3603580b511e8522b | refs/heads/master | 2021-01-16T22:13:32.110626 | 2014-12-17T13:37:32 | 2014-12-17T13:37:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,946 | py | from hearthbreaker.tags.action import ChangeAttack, Draw, ChangeHealth, Damage, Give, Windfury
from hearthbreaker.tags.base import Aura, Effect, Battlecry
from hearthbreaker.tags.condition import Adjacent, HasOverload
from hearthbreaker.tags.event import TurnEnded, CardPlayed
from hearthbreaker.tags.selector import MinionSelector, SelfSelector, PlayerSelector, CharacterSelector, BothPlayer, \
UserPicker
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY, MINION_TYPE
from hearthbreaker.game_objects import MinionCard, Minion
class AlAkirTheWindlord(MinionCard):
def __init__(self):
super().__init__("Al'Akir the Windlord", 8, CHARACTER_CLASS.SHAMAN, CARD_RARITY.LEGENDARY)
def create_minion(self, player):
return Minion(3, 5, windfury=True, charge=True, divine_shield=True, taunt=True)
class DustDevil(MinionCard):
def __init__(self):
super().__init__("Dust Devil", 1, CHARACTER_CLASS.SHAMAN, CARD_RARITY.COMMON, overload=2)
def create_minion(self, player):
return Minion(3, 1, windfury=True)
class EarthElemental(MinionCard):
def __init__(self):
super().__init__("Earth Elemental", 5, CHARACTER_CLASS.SHAMAN, CARD_RARITY.EPIC, overload=3)
def create_minion(self, player):
return Minion(7, 8, taunt=True)
class FireElemental(MinionCard):
def __init__(self):
super().__init__("Fire Elemental", 6, CHARACTER_CLASS.SHAMAN, CARD_RARITY.COMMON,
battlecry=Battlecry(Damage(3), CharacterSelector(players=BothPlayer(), picker=UserPicker())))
def create_minion(self, player):
return Minion(6, 5)
class FlametongueTotem(MinionCard):
def __init__(self):
super().__init__("Flametongue Totem", 2, CHARACTER_CLASS.SHAMAN, CARD_RARITY.COMMON, MINION_TYPE.TOTEM)
def create_minion(self, player):
return Minion(0, 3, auras=[Aura(ChangeAttack(2), MinionSelector(Adjacent()))])
class ManaTideTotem(MinionCard):
def __init__(self):
super().__init__("Mana Tide Totem", 3, CHARACTER_CLASS.SHAMAN, CARD_RARITY.RARE, MINION_TYPE.TOTEM)
def create_minion(self, player):
return Minion(0, 3, effects=[Effect(TurnEnded(), Draw(), PlayerSelector())])
class UnboundElemental(MinionCard):
def __init__(self):
super().__init__("Unbound Elemental", 3, CHARACTER_CLASS.SHAMAN, CARD_RARITY.COMMON)
def create_minion(self, player):
return Minion(2, 4, effects=[Effect(CardPlayed(HasOverload()), ChangeAttack(1), SelfSelector()),
Effect(CardPlayed(HasOverload()), ChangeHealth(1), SelfSelector())])
class Windspeaker(MinionCard):
def __init__(self):
super().__init__("Windspeaker", 4, CHARACTER_CLASS.SHAMAN, CARD_RARITY.COMMON,
battlecry=Battlecry(Give(Windfury()), MinionSelector(picker=UserPicker())))
def create_minion(self, player):
return Minion(3, 3)
| [
"daniel.yule@gmail.com"
] | daniel.yule@gmail.com |
410c20ac744f61d7b61d46b70026b2aa49f3ad83 | 074ad7bc28ed1b5d8a95a97c0aee9a0239b71eda | /Linked_List.py | 47a3871bf089ae40e5c3468fe2200c41c32c7b28 | [] | no_license | lukeschwenke/CSCI-241 | d93aaf84f8346f6b04654c5ece6cb1ef32726e5a | ab0912904163e4dc2612cbb2ba2a7de278fa20b6 | refs/heads/master | 2021-07-25T14:00:54.662648 | 2017-11-06T03:07:15 | 2017-11-06T03:07:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,788 | py | class Linked_List:
class __Node:
def __init__(self, val):
# declare and initialize the private attributes
# for objects of the Node class.
# TODO replace pass with your implementation
self.val = val
self.next = None
self.prev = None
def __init__(self):
# declare and initialize the private attributes
# for objects of the sentineled Linked_List class
# TODO replace pass with your implementation
self.__header = self.__Node(None)
self.__trailer = self.__Node(None)
self.__size = 0
self.__header.next = self.__trailer
self.__trailer.prev = self.__header
self.__header.prev = None
self.__trailer.next = None
def __len__(self):
# return the number of value-containing nodes in
# this list.
# TODO replace pass with your implementation
return self.__size
def append_element(self, val):
# increase the size of the list by one, and add a
# node containing val at the new tail position. this
# is the only way to add items at the tail position.
# TODO replace pass with your implementation
new_node = Linked_List.__Node(val)
new_node.next = self.__trailer
self.__trailer.prev.next = new_node
new_node.prev = self.__trailer.prev
self.__trailer.prev = new_node
self.__size = self.__size + 1
def insert_element_at(self, val, index):
# assuming the head position (not the header node)
# is indexed 0, add a node containing val at the
# specified index. If the index is not a valid
# position within the list, raise an IndexError
# exception. This method cannot be used to add an
# item at the tail position.
# TODO replace pass with your implementation
new_node = Linked_List.__Node(val)
if index >= self.__size or index < 0: #If size is 0 index error will be raised.
raise IndexError
current = self.__header
for i in range(index):
current = current.next
new_node.next = current.next
current.next = new_node
new_node.next.prev = new_node
new_node.prev = current
self.__size = self.__size + 1
def remove_element_at(self, index):
# assuming the head position (not the header node)
# is indexed 0, remove and return the value stored
# in the node at the specified index. If the index
# is invalid, raise an IndexError exception.
# TODO replace pass with your implementation
if index >= self.__size or index < 0:
raise IndexError
current = self.__header.next
for i in range(index):
current = current.next
current.prev.next = current.next
current.next.prev = current.prev
current.next = None
current.prev = None
self.__size = self.__size - 1
return current.val
def get_element_at(self, index):
# assuming the head position (not the header node)
# is indexed 0, return the value stored in the node
# at the specified index, but do not unlink it from
# the list. If the specified index is invalid, raise
# an IndexError exception.
# TODO replace pass with your implementation
if index >= self.__size or index < 0:
raise IndexError
current = self.__header.next
for i in range(index):
current = current.next
return current.val
def rotate_left(self):
# rotate the list left one position. Conceptual indices
# should all decrease by one, except for the head, which
# should become the tail. For example, if the list is
# [ 5, 7, 9, -4 ], this method should alter it to
# [ 7, 9, -4, 5 ]. This method should modify the list in
# place and must not return a value.
# TODO replace pass with your implementation.
if self.__size > 1: #If size is 1 it would not change the list.
head = self.__header.next
self.__header.next = head.next
head.next.prev = self.__header
head.next = self.__trailer
head.prev = self.__trailer.prev
self.__trailer.prev.next = head
self.__trailer.prev = head
def __str__(self):
# return a string representation of the list's
# contents. An empty list should appear as [ ].
# A list with one element should appear as [ 5 ].
# A list with two elements should appear as [ 5, 7 ].
# You may assume that the values stored inside of the
# node objects implement the __str__() method, so you
# call str(val_object) on them to get their string
# representations.
# TODO replace pass with your implementation
if self.__size == 0:
return("[ ]")
else:
current = self.__header.next
str_1 = "[ " + str(current.val)
current = current.next
for i in range(self.__size - 1):
str_1 = str_1 + ", " + str(current.val)
current = current.next
str_1 += " ]"
return(str_1)
def __iter__(self):
# initialize a new attribute for walking through your list
# TODO insert your initialization code before the return
# statement. do not modify the return statement.
self.__iter_Node = self.__header.next
return self
def __next__(self):
# using the attribute that you initialized in __iter__(),
# fetch the next value and return it. If there are no more
# values to fetch, raise a StopIteration exception.
# TODO replace pass with your implementation
if self.__iter_Node is self.__trailer:
raise StopIteration
to_return = self.__iter_Node.val
self.__iter_Node = self.__iter_Node.next
return to_return
if __name__ == '__main__':
# Your test code should go here. Be sure to look at cases
# when the list is empty, when it has one element, and when
# it has several elements. Do the indexed methods raise exceptions
# when given invalid indices? Do they position items
# correctly when given valid indices? Does the string
# representation of your list conform to the specified format?
# Does removing an element function correctly regardless of that
# element's location? Does a for loop iterate through your list
# from head to tail? Your writeup should explain why you chose the
# test cases. Leave all test cases in your code when submitting.
# TODO replace pass with your tests
Test_1 = Linked_List() #Testing empty Linked List
print(len(Test_1))
print(Test_1)
try:
Test_1.insert_element_at(5, 3)
except IndexError:
print("Good")
try:
Test_1.insert_element_at(4, 0)
except IndexError:
print("Good")
try:
Test_1.insert_element_at(3, -1)
except IndexError:
print("Good")
try:
Test_1.remove_element_at(2)
except IndexError:
print("Good")
try:
Test_1.remove_element_at(0)
except IndexError:
print("Good")
try:
Test_1.remove_element_at(-7)
except IndexError:
print("Good")
try:
Test_1.get_element_at(3)
except IndexError:
print("Good")
try:
Test_1.get_element_at(0)
except IndexError:
print("Good")
try:
Test_1.get_element_at(-4)
except IndexError:
print("Good")
try:
Test_1.rotate_left()
except IndexError:
print("Bad")
print(len(Test_1))
print(Test_1)
#testing Linked_List of one element
try:
Test_1.append_element(3)
except IndexError:
print("Bad")
try:
Test_1.insert_element_at(5, 3)
except IndexError:
print("Good")
try:
Test_1.insert_element_at(4, 0)
except IndexError:
print("Bad")
try:
Test_1.insert_element_at(3, -1)
except IndexError:
print("Good")
try:
Test_1.remove_element_at(2)
except IndexError:
print("Good")
try:
Test_1.remove_element_at(0)
except IndexError:
print("Bad")
try:
Test_1.remove_element_at(-7)
except IndexError:
print("Good")
try:
Test_1.get_element_at(3)
except IndexError:
print("Good")
try:
Test_1.get_element_at(0)
except IndexError:
print("Bad")
try:
Test_1.get_element_at(-4)
except IndexError:
print("Good")
try:
Test_1.rotate_left()
except IndexError:
print("Bad")
print(len(Test_1))
print(Test_1)
#testing Linked_List of multiple elements
try:
Test_1.append_element(16)
Test_1.append_element(92)
Test_1.append_element(7)
except IndexError:
print("Bad")
try:
Test_1.insert_element_at(5, 7)
except IndexError:
print("Good")
try:
Test_1.insert_element_at(4, 3)
except IndexError:
print("Bad")
try:
Test_1.insert_element_at(4, 0)
except IndexError:
print("Bad")
try:
Test_1.insert_element_at(3, -1)
except IndexError:
print("Good")
try:
Test_1.remove_element_at(7)
except IndexError:
print("Good")
try:
Test_1.remove_element_at(2)
except IndexError:
print("Bad")
try:
Test_1.remove_element_at(0)
except IndexError:
print("Bad")
try:
Test_1.remove_element_at(-7)
except IndexError:
print("Good")
try:
Test_1.get_element_at(12)
except IndexError:
print("Good")
try:
Test_1.get_element_at(3)
except IndexError:
print("Bad")
try:
Test_1.get_element_at(0)
except IndexError:
print("Bad")
try:
Test_1.get_element_at(-4)
except IndexError:
print("Good")
try:
Test_1.rotate_left()
except IndexError:
print("Bad")
print(len(Test_1))
print(Test_1)
#testing iterating through multiple element Linked_List
try:
for element in Test_1:
print(element)
except IndexError:
print("Bad") | [
"noreply@github.com"
] | lukeschwenke.noreply@github.com |
69530d51bfa94cf237f306935aea4ffdd6a31b07 | 69afe6535ec7fa531fbef4e386d446c6cabc541c | /m_word2vec_pt.py | a112877c9e1235e9c44c2956812794408efbce40 | [] | no_license | huanghqdx/SkipGram | ea371bca9b053c2d44ee41858a9d957a694acd48 | 1debb736335687e6c6bb788a08688ed5808d4f66 | refs/heads/master | 2020-09-17T12:48:28.773991 | 2018-04-27T15:16:09 | 2018-04-27T15:16:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,420 | py | import os
import random
import pickle
import zipfile
import argparse
import collections
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from tempfile import gettempdir
from sklearn.preprocessing import normalize
from six.moves import urllib
np.set_printoptions(precision=2)
verbose = 0
class SkipGram(nn.Module):
def __init__(self, vocab_size, embedding_dim):
super(SkipGram, self).__init__()
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
# v_embeddings for center words
self.v_embeddings = nn.Embedding(vocab_size, embedding_dim)
# u_embeddings for context words
self.u_embeddings = nn.Embedding(vocab_size, embedding_dim)
# Init parameters for u and v if train from scratch
self.v_embeddings.weight.data.uniform_(-1.0, 1.0)
self.u_embeddings.weight.data.normal_()
def forward(self, pos_v, pos_u, neg_u):
"""
:param pos_v: (batch_size) long tensor variables of input (center) word ids
:param pos_u: (batch_size) long tensor variables of label (context) word ids
:param neg_u: (batch_size, num_neg) long tensor variables of negative word ids for each pos_v
:return: The cross entropy loss between u and v
minimize L = -log[exp(pos_u'*pos_v) / sum_i(exp(neg_u'*pos_v))]
= log(sum_i(exp(neg_u'*pos_v))) - pos_u'*pos_v
= log(neg_score) - pos_score
"""
pos_v_embedding = self.v_embeddings(pos_v)
pos_u_embedding = self.u_embeddings(pos_u)
neg_u_embedding = self.u_embeddings(neg_u)
# Similarity between pos_u and pos_v, i.e., pos_u'*pos_v (dot product between pos_u and pos_v)
# pos_u_embedding: (batch_size, embedding_dim) -> (batch_size, 1, embedding_dim)
# pos_v_embedding: (batch_size, embedding_dim) -> (batch_size, embedding_dim, 1)
# pos_score: (batch_size) each value is pos_u'*pos_v
pos_score = torch.bmm(pos_u_embedding.view(-1, 1, self.embedding_dim),
pos_v_embedding.view(-1, self.embedding_dim, 1)).squeeze()
if verbose:
print 'pos_score:', pos_score.size()
print pos_score.data
# Similarity between neg_u and pos_v, i.e., dot product between each neg_u and pos_v
# neg_u_embedding: (batch_size, num_neg, embedding_dim)
# pos_v_embedding: (batch_size, embedding_dim) -> (batch_size, embedding_dim, 1)
# neg_score: (batch_size, num_neg) each value is neg_u'*pos_v
neg_score = torch.bmm(neg_u_embedding, pos_v_embedding.view(-1, self.embedding_dim, 1)).squeeze()
if verbose:
print 'neg_score:', neg_score.size()
print neg_score.data
# Compute log(sum_i(exp(neg_u'*pos_v)))
# neg_score: (batch_size, num_neg)
# log_neg_score: (batch_size)
log_neg_score = torch.log(torch.sum(torch.exp(neg_score), dim=1))
# Compute final score then take average, i.e., score = log(neg_score) - pos_score
# log_neg_score: (batch_size)
# pos_score: (batch_size)
# score: scalar for loss
score = log_neg_score - pos_score
score = torch.mean(score)
return score
def forward_u(self, u):
return self.u_embeddings(u)
def forward_v(self, v):
return self.v_embeddings(v)
def set_u_embeddings(self, weights):
self.u_embeddings.weight.data.copy_(torch.from_numpy(weights))
def set_v_embeddings(self, weights):
self.v_embeddings.weight.data.copy_(torch.from_numpy(weights))
def get_u_embeddings(self):
return self.u_embeddings.weight.data
def get_v_embeddings(self):
return self.v_embeddings.weight.data
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def maybe_create_path(path):
if not os.path.exists(path):
os.mkdir(path)
print "Created a path: %s" % path
def maybe_download(file_name, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
url = 'http://mattmahoney.net/dc/'
local_file_name = os.path.join(gettempdir(), file_name)
if not os.path.exists(local_file_name):
local_file_name, _ = urllib.request.urlretrieve(url + file_name, local_file_name)
stat_info = os.stat(local_file_name)
if stat_info.st_size == expected_bytes:
print 'Found and verified', file_name
else:
print stat_info.st_size
raise Exception('Failed to verify' + local_file_name + '. Can you get to it with a browser?')
return local_file_name
def read_data(file_name):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(file_name) as f:
vocabs = (f.read(f.namelist()[0])).split()
return vocabs
def build_dataset(words, num_words):
"""
Building the dataset for Skip Gram model
:param words: 1d Array of words
:param num_words: The number of words in the dictionary
:return:
"""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(num_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
def generate_batch(data, index, batch_sz, n_skips, skip_sz):
assert batch_sz % n_skips == 0
assert n_skips <= 2 * skip_sz
inputs = np.ndarray(shape=batch_sz, dtype=np.int64)
labels = np.ndarray(shape=batch_sz, dtype=np.int64)
span = 2 * skip_sz + 1
buff = collections.deque(maxlen=span)
if index + span > len(data):
index = 0
buff.extend(data[index:index + span])
index += span
for i in range(batch_sz // n_skips):
context_words = [w for w in range(span) if w != skip_sz]
words_to_use = random.sample(context_words, n_skips)
for j, context_word in enumerate(words_to_use):
inputs[i * n_skips + j] = buff[skip_sz]
labels[i * n_skips + j] = buff[context_word]
if index == len(data):
buff.extend(data[0:span])
index = span
else:
buff.append(data[index])
index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
index = (index + len(data) - span) % len(data)
return inputs, labels, index
def main():
# Step 0: Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument('--cuda', type=str2bool, default='true')
parser.add_argument('--gpuid', type=int, default=3)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--vocabulary_size', type=int, default=100000)
parser.add_argument('--embedding_size', type=int, default=128)
parser.add_argument('--learning_rate', type=float, default=1.0)
parser.add_argument('--num_steps', type=int, default=100001)
parser.add_argument('--avg_step', type=int, default=2000)
parser.add_argument('--ckpt_step', type=int, default=10000)
parser.add_argument('--skip_window', type=int, default=1)
parser.add_argument('--num_skips', type=int, default=2)
parser.add_argument('--num_sampled', type=int, default=64)
parser.add_argument('--valid_size', type=int, default=16)
parser.add_argument('--valid_window', type=int, default=100)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpuid)
print 'Step 1: Download the data'
file_name = maybe_download('text8.zip', 31344016)
words = read_data(file_name)
print '\tData size:', len(words)
print 'Step 2: Build words dictionary'
data, count, dictionary, reverse_dictionary = build_dataset(words, args.vocabulary_size)
del words # Hint to reduce memory.
print '\tMost common words (+UNK)', count[:5]
print '\tSample data', data[:10], [reverse_dictionary[i] for i in data[:10]]
index = 0
print '\tData index:', index
print 'Step 3: Test function for generating training batch for the skip-gram model'
batch, labels, _ = generate_batch(data=data, index=index, batch_sz=8, n_skips=2, skip_sz=1)
for i in range(8):
print '\t', batch[i], reverse_dictionary[batch[i]], '->', labels[i], reverse_dictionary[labels[i]]
print 'Step 4: Build Skip Gram model'
net = SkipGram(args.vocabulary_size, args.embedding_size)
# Load pretrained weights
pretrained_file = './weights/w2v.model'
if os.path.exists(pretrained_file):
v_weights, u_weights = pickle.load(open(pretrained_file, 'r'))
net.set_v_embeddings(v_weights)
net.set_u_embeddings(u_weights)
# Optimizer for parameters
optimizer = torch.optim.SGD(net.parameters(), lr=args.learning_rate)
# Cuda
if args.cuda:
net.cuda()
# valid examples for checking
valid_examples = np.random.choice(args.valid_window, args.valid_size, replace=False)
batch_valid = torch.LongTensor(valid_examples)
batch_valid = Variable(batch_valid.cuda()) if args.cuda else Variable(batch_valid)
print 'Step 5: Train Skip Gram model'
avg_loss = 0
for step in xrange(args.num_steps):
batch_mids, batch_lbls, index = generate_batch(data, index, args.batch_size, args.num_skips, args.skip_window)
# batch_inps: 1d array: (batch_size)
batch_inps = np.squeeze(batch_mids)
# batch_tgts: 1d array: (batch_size)
batch_tgts = np.squeeze(batch_lbls)
# batch_negs: 2d array: (batch_size, num_neg) in this case, we use other pos_v as neg_v
batch_negs = np.repeat(np.expand_dims(batch_lbls, 1).transpose(), batch_lbls.shape[0], axis=0)
# batch_negs = np.tile(np.repeat(np.expand_dims(batch_lbls, 1).transpose(), batch_lbls.shape[0], axis=0), 2)
if verbose:
print 'batch_inps:', batch_inps.shape
print batch_inps
print 'batch_tgts:', batch_tgts.shape
print batch_tgts
print 'batch_negs:', batch_negs.shape
print batch_negs
# To long tensor
batch_inps = torch.LongTensor(batch_inps)
batch_tgts = torch.LongTensor(batch_tgts)
batch_negs = torch.LongTensor(batch_negs)
# Cuda
batch_inps = Variable(batch_inps.cuda()) if args.cuda else Variable(batch_inps)
batch_tgts = Variable(batch_tgts.cuda()) if args.cuda else Variable(batch_tgts)
batch_negs = Variable(batch_negs.cuda()) if args.cuda else Variable(batch_negs)
# Zero gradient
net.zero_grad()
# Forward and get loss
loss = net(batch_inps, batch_tgts, batch_negs)
# Backward
loss.backward()
# Step the optimizer
optimizer.step()
avg_loss += loss.data[0]
if step % args.avg_step == 0:
if step > 0:
avg_loss /= args.avg_step
print '\tAverage loss at iter %6d:' % step, avg_loss
avg_loss = 0
if step % args.ckpt_step == 0:
# Get embeddings of valid words and perform L2-normalization
valid_embeddings = net.forward_v(batch_valid)
valid_embeddings = valid_embeddings.data.cpu().numpy() if args.cuda else valid_embeddings.data.numpy()
valid_embeddings = normalize(valid_embeddings, norm='l2', axis=1)
# Get embeddings of all words and perform L2-normalization
embeddings = net.get_v_embeddings().cpu().numpy() if args.cuda else net.get_v_embeddings().numpy()
normalized_embeddings = normalize(embeddings, norm='l2', axis=1)
# Compute cosine similarity between valid words and all words in dictionary
sim = np.matmul(valid_embeddings, np.transpose(normalized_embeddings))
# Print top-k neighbors for each valid word
for i in xrange(args.valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = '\tNearest to %-10s :' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %-16s' % (log_str, close_word)
print log_str
print 'Step 6: Save the trained model'
embeddings = net.get_v_embeddings().cpu().numpy() if args.cuda else net.get_v_embeddings().numpy()
final_embeddings = normalize(embeddings, norm='l2', axis=1)
model_dir = './models'
maybe_create_path(model_dir)
model_path = os.path.join(model_dir, 'word2vec_pt.model')
print '\tSaving trained weights to %s' % model_path
pickle.dump([final_embeddings, dictionary, reverse_dictionary], open(model_path, 'w'))
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | huanghqdx.noreply@github.com |
7ab83a14bab5545f896d9f65330adbc90e497586 | ccdab7391b7d5617f5c6fd7c7a1cca431761048e | /polls/views.py | 4bf7e2de546ce2534172c7e42212eaa7bbaa1e3d | [] | no_license | barhoring/DangoPoll | a077805dc5905613bf8356bc804dd7d83f21acb6 | 915f5ef33eed76bdff4d1ba6d1badf1cf78ed506 | refs/heads/master | 2021-07-21T14:52:38.002700 | 2017-10-31T15:38:08 | 2017-10-31T15:38:08 | 105,390,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,792 | py | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from .models import Question, Choice
from django.urls import reverse
from django.views import generic
from django.utils import timezone
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| [
"bar.apple@gmail.com"
] | bar.apple@gmail.com |
8277d4f471be2dee3c2676a6bf9cbd30cf236a64 | c0ad282ab743a315e2f252a627933cb168434c1d | /models/agreement/type_prior.py | bde43251219d85d73f078c3d0ba4fad4980ae25c | [
"MIT"
] | permissive | AlexKuhnle/ShapeWorld | 6d1e16adc94e860abae99ade869f72575f573bc4 | e720bf46e57fc01326d04d639fa6133d9c12158f | refs/heads/master | 2021-07-09T00:02:33.808969 | 2021-04-19T11:10:52 | 2021-04-19T11:10:52 | 80,815,972 | 58 | 28 | MIT | 2021-04-19T11:10:53 | 2017-02-03T09:40:19 | Python | UTF-8 | Python | false | false | 2,722 | py | from models.TFMacros.tf_macros import *
def model(model, inputs, dataset_parameters):
caption = Input(name='caption_rpn', shape=dataset_parameters['caption_rpn_shape'], dtype='int', tensor=inputs.get('caption_rpn'))()
caption_length = Input(name='caption_rpn_length', shape=(), dtype='int', tensor=inputs.get('caption_rpn_length'))
agreement = Input(name='agreement', shape=(), dtype='float', tensor=inputs.get('agreement'))()
agreement = (
(caption, caption_length, agreement) >>
SuffixPrior(suffix_length=1, vocabulary_size=dataset_parameters['rpn_vocabulary_size']) >>
Binary(name='agreement', binary_transform=False, tensor=agreement)
)
return agreement
class SuffixPrior(Unit):
num_in = 3
num_out = 1
def __init__(self, suffix_length, vocabulary_size):
super(SuffixPrior, self).__init__()
self.suffix_length = suffix_length
self.vocabulary_size = vocabulary_size
def initialize(self, caption, caption_length, agreement):
super(SuffixPrior, self).initialize(caption, caption_length, agreement)
shape = tuple(self.vocabulary_size for _ in range(self.suffix_length)) + (2,)
self.suffix_agreement_counts = tf.get_variable(name='suffix-agreement-counts', shape=shape, dtype=tf.int32, initializer=tf.zeros_initializer(dtype=tf.int32), trainable=False)
def forward(self, caption, caption_length, agreement):
super(SuffixPrior, self).forward(caption, caption_length, agreement)
batch_size = tf.shape(input=caption)[0]
slice_indices = [tf.stack(values=(tf.range(batch_size), caption_length - (self.suffix_length - n)), axis=1) for n in range(self.suffix_length)]
suffix = tf.stack(values=[tf.gather_nd(params=caption, indices=indices) for indices in slice_indices], axis=1)
agreement_counts = tf.gather_nd(params=self.suffix_agreement_counts, indices=suffix)
prior = tf.where(
condition=(agreement_counts[:, 0] > agreement_counts[:, 1]),
x=tf.zeros(shape=(batch_size,)),
y=tf.where(
condition=(agreement_counts[:, 0] < agreement_counts[:, 1]),
x=tf.ones(shape=(batch_size,)),
y=(tf.ones(shape=(batch_size,)) * 0.5)
)
)
agreement = tf.expand_dims(input=tf.cast(x=agreement, dtype=Model.dtype('int')), axis=1)
indices = tf.concat(values=(suffix, agreement), axis=1)
updates = tf.ones(shape=(batch_size,), dtype=Model.dtype('int'))
assert Model.current.optimization is None
Model.current.optimization = tf.scatter_nd_add(ref=self.suffix_agreement_counts, indices=indices, updates=updates)
return prior
| [
"aok25@cl.cam.ac.uk"
] | aok25@cl.cam.ac.uk |
952ecae4e414db6b616a055126571c0e7b129cdf | 8b427d0a012d7dbd3b49eb32c279588f9ebd4e6e | /05 排序和搜索/binary_search.py | 33fe499fccf320c9b9bcc7589236e441fdbcd076 | [] | no_license | chenyang929/Problem-Solving-with-Algorithms-and-Data-Structures-using-Python-Notes | e9f1b324d86963333edaf855fdb9e126e59e8542 | aed976e020147fe30a8e0bb708dfbe4bab4c15f7 | refs/heads/master | 2020-03-18T02:46:12.385967 | 2018-07-24T08:24:41 | 2018-07-24T08:24:41 | 134,206,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,023 | py | # binary_search.py
# 循环版
def binary_search(lst, item):
low = 0
high = len(lst) - 1
while low <= high:
mid = (low + high) // 2
guess = lst[mid]
if item == guess:
return mid
elif item > guess:
low += 1
else:
high -= 1
return -1
# 递归版(内存开销大)
def binary_search1(lst, item, low=0, high=None):
if high is None:
high = len(lst) - 1
if low > high:
return -1
else:
mid = (low + high) // 2
guess = lst[mid]
if item == guess:
return mid
elif item > guess:
low += 1
return binary_search1(lst, item, low, high)
else:
high -= 1
return binary_search1(lst, item, low, high)
if __name__ == '__main__':
l = [1, 3, 4, 7, 9, 12, 14]
print(binary_search(l, 12)) # 5
print(binary_search1(l, 12)) # 5
print(binary_search(l, 5)) # -1
print(binary_search1(l, 5)) # -1
| [
"chenyang929code@gmail.com"
] | chenyang929code@gmail.com |
809968c08a92bfd735c7051cf2781edcae3987d0 | 0a5220c19469c330e539632e6643e084d72666d8 | /services/translation_service.py | 3ed9e99b24238a1809867bcb8dbb9fd5b160e37d | [] | no_license | a-grgv/projecthistorybabel | 7d5d9c5fdfb973d9e930e3c339ae12f7a782defc | 4b897687597a4b9824877b79cb5dc9cc396c8bb1 | refs/heads/master | 2023-08-27T01:25:28.507806 | 2021-11-08T08:56:46 | 2021-11-08T08:56:46 | 425,600,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | """
This module contains methods to translate text using Azure Translation API
"""
from typing import List
import requests
from utils.service_constants import TRANSLATE_REQUEST_PARAMS, TRANSLATE_URL, TRANSLATE_HEADERS
def translate_text(text: str) -> List[dict]:
"""
Translate given text
:param text: Text to be translated
:return: List of translations from detected languages
"""
body = [{
'text': text
}]
request = requests.post(TRANSLATE_URL, params=TRANSLATE_REQUEST_PARAMS, headers=TRANSLATE_HEADERS, json=body)
response = request.json()
return response
| [
"a.georgiev@buildingradar.com"
] | a.georgiev@buildingradar.com |
d81f10d845bcb6bb5f7a66f9db38dee32a92de47 | ccd9fc377cdeaa770ac8a6f7b2f958c217a175a0 | /FunPythonProjects/PalindromeChecker.py | 3b980ff456c67d991db187e862f816d1b1564d45 | [] | no_license | afs2015/SmallPythonProjects | bdb23ece89fef7bc8fa8cf7d5dd575ae431e83cc | 4ef0cffb975df64c09edba82bd4d872d78dfee3d | refs/heads/master | 2021-01-24T06:12:01.144068 | 2014-04-08T17:12:55 | 2014-04-08T17:12:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | #!/usr/bin/python
# Author: Andrew Selzer
# Purpose: Simple function to check if a string can be a palindrome, returns true or false.
def isPalindrome(word):
return word == word[::-1] | [
"andrewfselzer@gmail.com"
] | andrewfselzer@gmail.com |
050bd29b89b52f86e1dc5cf9b4e7c13505a1efc4 | 96f000d16e29fd60199fea8585af515da7818b03 | /ro password.py | 1a47ad391000909881cf11ccbac7df3cdc469c27 | [] | no_license | Gunjankushwaha/hackthon2 | d80e7b9450398ed1abdca2fc9a49f1cf9c6bc46f | bf7fab859fc45260f7c7aa6f2378deb30d6e616d | refs/heads/main | 2023-09-04T22:32:23.670930 | 2021-11-21T15:08:27 | 2021-11-21T15:08:27 | 430,399,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py |
print("welcom to facebook")
facebook_name=input("enter the name")
if facebook_name=="gunjan"or"sona":
print("correct your name")
full_name=input("enter the full_name:")
if full_name=="gunjan kushwaha" or "sona kumari":
print("correct name")
password=input("enter the password:")
if password=="567893":
print("good password")
gmail=input("enter the gmail")
if gmail=="gunjankushwahakumari":
print("right gmail")
gender=input("enter the gender")
if gender=="male" or "female":
print("ok")
birthday=input("enter the dade of birth")
if birthday>="0 to 9":
print("dade birth is right")
else:
print("wrong your dade birth")
else:
print("wrong")
else:
print("wrong your gmail")
else:
print("wrong your password")
else:
print("wrong your full_name")
else:
print("wrong your name")
# username=input("enter the username :")
# userpassword=int(input("enter the userpassword :"))
# if username=="gunjan":
# if userpassword==89:
# print("login successfull : ")
# else:
# print("incorect password")
# elif username!="gunjan" and userpassword!=89:
# print("both condition are wrong :")
# print:("creact new account: ")
# username1=input("enter the user name")
# userpassword1=int(input("enter the password"))
# print("your new account is sucessfully created")
# else:
# print("increact user name ")
| [
"noreply@github.com"
] | Gunjankushwaha.noreply@github.com |
ba3ad6ea7accec426bc1863e721e6bf8f64d7007 | a0d0a7ef9ab2c7c06c864a1ae372171e15606b46 | /lib/modeling/__init__.py | a30625498176d72f1f0fba72cfe35e4c6cb2e52d | [] | no_license | mmmcn/votenet-manhattan | 866c0d54ede6cf1c2f3dc4767441173656cc98ee | 88b0dafbaf367557c028db75abbe0f30d5abf65b | refs/heads/master | 2023-05-02T18:41:06.752471 | 2021-05-21T01:50:09 | 2021-05-21T01:50:09 | 366,908,020 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | from .backbones import *
from .builder import (build_backbone, build_detector, build_fusion_layer,
build_head, build_loss, build_middle_encoder, build_neck,
build_roi_extractor, build_shared_head,
build_voxel_encoder)
from .dense_heads import *
from .detectors import *
from .model_utils import *
__all__ = ['build_backbone', 'build_detector', 'build_fusion_layer',
'build_head', 'build_loss', 'build_middle_encoder', 'build_neck',
'build_roi_extractor', 'build_shared_head', 'build_voxel_encoder']
| [
"1139553802@qq.com"
] | 1139553802@qq.com |
ba777c4c6fe4858608024de11c6f3e3abb75bd5b | 11000b1e811519ab1c65dbfcd4efbcf93f72b853 | /capsule_net_keras/capsule_network.py | 6e066f69f7d476d8aaa6d0aafe6dc5c6fd92fb9b | [] | no_license | cnlinxi/deep_learning_practice | 41aeeea44a4f8aa204ff03f146c3cace5ea103af | 370485bc2b910ee1cd64a7b7550072e05fb5913f | refs/heads/master | 2020-03-22T16:08:07.390057 | 2019-03-04T13:17:22 | 2019-03-04T13:17:22 | 140,305,308 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,484 | py | # -*- coding: utf-8 -*-
# @Time : 2018/7/18 23:52
# @Author : MengnanChen
# @FileName: capsule_network.py
# @Software: PyCharm Community Edition
from keras import layers
from keras import backend as K
from keras import models
from keras import callbacks
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from keras.datasets import mnist
from keras.utils import to_categorical
import numpy as np
from capsule_layer import CapsuleLayer,PrimaryCap,Length,Mask
K.set_image_data_format(data_format='channel_last')
def capsule_net(input_shape,n_class,routings):
x=layers.Input(shape=input_shape)
conv1=layers.Conv2D(filters=256,kernel_size=9,strides=1,padding='valid',
activation='relu',name='conv1')(x)
primary_capsule=PrimaryCap(conv1,dim_capsule=8,n_channels=32,
kernel_size=9,strides=2,padding='valid')
digit_capsule=CapsuleLayer(output_dim_capsules=16,routings=routings,
output_num_capsules=n_class,name='digit_capsule')(primary_capsule)
output_capsules=Length(name='capsule_net')(digit_capsule)
y=layers.Input(shape=(n_class,))
masked_with_y=Mask()([digit_capsule,y])
masked=Mask(digit_capsule)
decoder=models.Sequential(name='decoder')
decoder.add(layers.Dense(units=512,activation='relu',input_dim=16*n_class))
decoder.add(layers.Dense(units=1024,activation='relu'))
decoder.add(layers.Dense(units=np.prod(input_shape),activation='softmax'))
decoder.add(layers.Reshape(target_shape=input_shape,name='output_reconstruction'))
train_model=models.Model([x,y],[output_capsules,decoder(masked_with_y)])
eval_model=models.Model(x,[output_capsules,decoder(masked)])
noise = layers.Input(shape=(n_class, 16))
noised_digitcaps = layers.Add()([digit_capsule, noise])
masked_noised_y = Mask()([noised_digitcaps, y])
manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
return train_model, eval_model, manipulate_model
def margin_loss(y_true,y_pred):
L=y_true*K.square(K.maximum(0.,0.9-y_pred))+\
0.5*(1-y_true)*K.square(K.maximum(0.,y_pred-0.1))
return K.mean(K.sum(L,axis=1))
def train(model,data,args):
(x_train,y_train),(x_test,y_test)=data
log=callbacks.CSVLogger(args.save_dir+'/log.csv')
tb=callbacks.TensorBoard(log_dir=args.save_dir+'tensorboard-logs',
batch_size=args.batch_size,histogram_freq=int(args.debug))
checkpoint=callbacks.ModelCheckpoint(args.save_dir+'/weights-{epoch:02d}.h5',monitor='val_capsnet_acc',
save_best_only=True,save_weights_only=True,verbose=1)
lr_decay=callbacks.LearningRateScheduler(schedule=lambda epoch:args.lr*(args.lr_decay**epoch))
model.compile(optimizer=optimizers.Adam(lr=args.lr),
loss=[margin_loss,'mse'],
loss_weight=[1., args.lam_recon],
metrics={'capsnet':'accuracy'})
def train_generator(x,y,batch_size,shift_fraction=0.):
train_data_generator=ImageDataGenerator(width_shift_range=shift_fraction, # 图片宽度的某个比例,数据提升时图片水平偏移的幅度
height_shift_range=shift_fraction)
generator=train_data_generator.flow(x,y,batch_size)
while True:
x_batch,y_batch=generator.next()
yield x_batch,y_batch
model.fit_generator(generator=train_generator(x_train,y_train,args.batch_size,args.shift_fraction),
steps_per_epoch=int(x_train.shape[0]/args.batch_size),
epochs=args.epoch,
validation_data=[[x_test,y_test],[y_test,y_test]],
callbacks=[log,tb,checkpoint,lr_decay])
model.save_weights(args.save_dir+'/trained_model.h5')
print('Trained model saved to \'%s/trained_model.h5\'' % args.save_dir)
# plot log
return model
def load_mnist():
(x_train,y_train),(x_test,y_test)=mnist.load_data()
x_train=x_train.reshape(-1,28,28,1).astype('float32')/255.
x_test=x_test.reshape(-1,28,28,1).astype('float32')/255.
y_train=to_categorical(y_train.astype('float32'))
y_test=to_categorical(y_test.astype('float32'))
return (x_train,y_train),(x_test,y_test)
if __name__ == '__main__':
import os
import argparse
from keras.preprocessing.image import ImageDataGenerator
from keras import callbacks
# setting the hyper parameters
parser = argparse.ArgumentParser(description="Capsule Network on MNIST.")
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--batch_size', default=100, type=int)
parser.add_argument('--lr', default=0.001, type=float,
help="Initial learning rate")
parser.add_argument('--lr_decay', default=0.9, type=float,
help="The value multiplied by lr at each epoch. Set a larger value for larger epochs")
parser.add_argument('--lam_recon', default=0.392, type=float,
help="The coefficient for the loss of decoder")
parser.add_argument('-r', '--routings', default=3, type=int,
help="Number of iterations used in routing algorithm. should > 0")
parser.add_argument('--shift_fraction', default=0.1, type=float,
help="Fraction of pixels to shift at most in each direction.")
parser.add_argument('--debug', action='store_true',
help="Save weights by TensorBoard")
parser.add_argument('--save_dir', default='./result')
parser.add_argument('-t', '--testing', action='store_true',
help="Test the trained model on testing dataset")
parser.add_argument('--digit', default=5, type=int,
help="Digit to manipulate")
parser.add_argument('-w', '--weights', default=None,
help="The path of the saved weights. Should be specified when testing")
args = parser.parse_args()
print(args)
if os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
(x_train,y_train),(x_test,y_test)=load_mnist()
train_model,eval_model,manipulate_model=capsule_net(x_train.shape[1:],
n_class=y_train.shape[1],
routings=args.routings)
train_model.summary()
| [
"cncmn@hotmail.com"
] | cncmn@hotmail.com |
1eec9542f9e09968df25dc96be7e9b4c15c146b9 | e9d15cf96d9f03761b8cda870973d1f62d1c30bb | /pp/flatten12lp.py | 374c44352e13edebf269afd6122436ceaae58088 | [] | no_license | gkoundry/glen | 32ecb9a40c7e14ba1c1a2b603b024a7a0a7264e1 | f1bf76a2a247e19803ba4b8b3a55eea33b76708e | refs/heads/master | 2020-05-09T17:07:30.657019 | 2014-11-18T14:28:41 | 2014-11-18T14:28:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,488 | py | import sys
import cPickle
from collections import defaultdict
def mean(l):
return sum(l)*1.0/len(l)
def tval(c):
return ord(c)-65
def carval(v):
if v=='':
return (0,1)
else:
return (ord(v)-ord('a')+1,0)
def rfx(v):
if v=='NA':
return (0,1)
else:
return (v,0)
def cvt(t):
h,m = t.split(':')
return int(h)*60+int(m)
def tdiff(t1,t2):
if t1<=t2:
return t2-t1
return t2+24*60-t1
pred={}
for col in ('A','B','C','D','E','F','G'):
pred[col]={}
f=open('pred'+col+'.csv')
for l in f:
id,p=l.rstrip().split(',')
pred[col][int(id)]=int(p)
levels = { 'A':set(),'B':set(),'C':set(),'D':set(),'E':set(),'F':set(),'G':set() }
last = {}
llast = {}
ans = {}
ca = {}
costlvl = {}
costdir = {}
freq={}
gs={}
ho={}
ca={}
dp={}
mc={}
ao={}
ad={}
cv={}
cvm={}
rf={}
rfm={}
cp={}
hist={}
cpm={}
dp={}
dpm={}
state={}
prior={}
lcost={}
cost1={}
cost2={}
uniq={}
states = set()
for col in ('A','B','C','D','E','F','G'):
prior[col]=defaultdict(int)
tcost=0
ccost=0
day={}
hour={}
time1={}
time2={}
quotes = defaultdict(int)
f=open('trains1.csv','r')
h=f.readline()
for l in f:
a=l.rstrip().split(',')
id=a[0]
rt=a[2]
cs=float(a[24])
if rt=='1':
ans[id] = a[17:24]
for col in ('A','B','C','D','E','F','G'):
val = a[ord(col)-48]
levels[col].add(val)
tcost = 0
ccost = 0
else:
gs[id]=a[7]
ho[id]=a[8]
ca[id]=a[9]
dp[id]=a[16]
mc[id]=a[14]
ao[id]=a[12]
cv[id],cvm[id] = carval(a[10])
rf[id],rfm[id] = rfx(a[11])
cp[id],cpm[id] = rfx(a[15])
dp[id],dpm[id] = rfx(a[16])
day[id]=a[3]
hour[id]=a[4].split(':')[0]
if id not in time1:
time1[id] = cvt(a[4])
time2[id] = cvt(a[4])
if id not in cost1:
cost1[id] = cs
cost2[id] = cs
else:
if cs>cost2[id]:
cost2[id]=cs
if cs<cost1[id]:
cost1[id]=cs
states.add(a[5])
state[id]=a[5]
ad[id]=int(a[12])-int(a[13])
quotes[id] += 1
if tcost > 0:
costdir[id] = cs / (tcost/ccost)
tcost += cs
ccost += 1
costlvl[id] = tcost / ccost
if id not in freq:
freq[id] = {}
lcost[id] = {}
hist[id] = {}
uniq[id] = {}
for col in ('A','B','C','D','E','F','G'):
uniq[id][col] = set()
freq[id][col] = defaultdict(int)
lcost[id][col]=defaultdict(int)
hist[id][col]=defaultdict(int)
for col in ('A','B','C','D','E','F','G'):
uniq[id][col].add(a[ord(col)-48])
freq[id][col][a[ord(col)-48]] += 1
lcost[id][col][a[ord(col)-48]] += cs
prior[col][a[ord(col)-48]] += 1
if id in last and a[ord(col)-48] == last[id][ord(col)-65]:
hist[id][col] += 1
else:
hist[id][col]=1
if id in last:
llast[id] = last[id][:]
last[id] = a[17:24]
f=open('train12lp.csv','w')
f.write('id,y,ans,last,pred')
for col in ('A','B','C','D','E','F','G'):
f.write(',diff'+col)
f.write(',diff')
for col in ('A','B','C','D','E','F','G'):
for lvl in sorted(list(levels[col])):
f.write(',last'+col+lvl)
for col in ('A','B','C','D','E','F','G'):
for lvl in sorted(list(levels[col])):
f.write(',llast'+col+lvl)
for col in ('A','B','C','D','E','F','G'):
for lvl in sorted(list(levels[col])):
f.write(',pred'+col+lvl)
for col in ('A','B','C','D','E','F','G'):
for lvl in sorted(list(levels[col])):
f.write(',freq'+col+lvl)
for col in ('A','B','C','D','E','F','G'):
f.write(',hist'+col)
for col in ('A','B','C','D','E','F','G'):
f.write(',uniq'+col)
f.write(',gs')
f.write(',ho')
f.write(',ca')
f.write(',dp')
f.write(',mc')
f.write(',ao')
f.write(',ad')
f.write(',costlvl')
f.write(',costdir')
f.write(',costdiff')
f.write(',quotes')
f.write(',day5')
f.write(',day6')
f.write(',hour')
f.write(',time_diff')
f.write(',cv')
f.write(',cvm')
f.write(',rf')
f.write(',rfm')
f.write(',cp')
f.write(',cpm')
f.write(',dp')
f.write(',dpm')
for st in sorted(list(states)):
f.write(',%s' % st)
#f.write(',csrt1')
#f.write(',csrt2')
f.write('\n')
for id in ans.keys():
pc=0
for col in ('A','B','C','D','E','F','G'):
if int(ans[id][tval(col)])==int(pred[col][int(id)]):
pc+=1
pl=0
for col in ('A','B','C','D','E','F','G'):
if int(ans[id][tval(col)])==int(last[id][tval(col)]):
pl+=1
f.write('%s,%d,%s,%s,' % (id,pl!=7,''.join(ans[id]),''.join(last[id])))
for col in ('A','B','C','D','E','F','G'):
f.write('%d' % pred[col][int(id)])
diff=0
for col in ('A','B','C','D','E','F','G'):
if int(last[id][tval(col)])!=int(pred[col][int(id)]):
f.write(',1')
diff+=1
else:
f.write(',0')
f.write(',%d' % diff)
for col in ('A','B','C','D','E','F','G'):
for lvl in sorted(list(levels[col])):
f.write(',%d' % int(last[id][tval(col)]==lvl))
for col in ('A','B','C','D','E','F','G'):
for lvl in sorted(list(levels[col])):
f.write(',%d' % int(llast[id][tval(col)]==lvl))
for col in ('A','B','C','D','E','F','G'):
for lvl in sorted(list(levels[col])):
f.write(',%d' % int(pred[col][int(id)]==int(lvl)))
for col in ('A','B','C','D','E','F','G'):
for lvl in sorted(list(levels[col])):
f.write(',%d' % freq[id][col][lvl])
for col in ('A','B','C','D','E','F','G'):
f.write(',%d' % hist[id][col])
for col in ('A','B','C','D','E','F','G'):
f.write(',%d' % len(uniq[id][col]))
f.write(',%s' % gs[id])
f.write(',%s' % ho[id])
f.write(',%s' % ca[id])
f.write(',%s' % dp[id])
f.write(',%s' % mc[id])
f.write(',%s' % ao[id])
f.write(',%s' % ad[id])
f.write(',%f' % costlvl[id])
f.write(',%f' % costdir[id])
f.write(',%f' % (cost2[id]-cost1[id]))
f.write(',%d' % quotes[id])
f.write(',%d' % int(day[id]=='5'))
f.write(',%d' % int(day[id]=='6'))
f.write(',%s' % hour[id])
f.write(',%d' % tdiff(time1[id],time2[id]))
f.write(',%s' % cv[id])
f.write(',%s' % cvm[id])
f.write(',%s' % rf[id])
f.write(',%s' % rfm[id])
f.write(',%s' % cp[id])
f.write(',%s' % cpm[id])
f.write(',%s' % dp[id])
f.write(',%s' % dpm[id])
#f.write(',%s' % predg[int(id)])
for st in sorted(list(states)):
f.write(',%d' % int(state[id]==st))
# c1 = 0
# c2 = 0
# t1 = 0
# t2 = 0
# for k,v in lcost[id][target].items():
# if k==last[id][tval(target)]:
# c1 += v
# t1 += 1
# t1 += freq[id][target][k]
# else:
# c2 += v
# t2 += 1
# t2 += freq[id][target][k]
# if c2==0:
# c2=c1
# t2=t1
# f.write(',%f' % ((c1*1.0/t1)/(c2*1.0/t2)))
# c1 = 0
# c2 = 0
# t1 = 0
# t2 = 0
# for k,v in lcost[id][target].items():
# if k==last[id][tval(target)]:
# c1 += v
# t1 += freq[id][target2][k]
# else:
# c2 += v
# t2 += freq[id][target2][k]
# if c2==0:
# c2=c1
# t2=t1
# f.write(',%f' % ((c1*1.0/t1)/(c2*1.0/t2),))
f.write('\n')
| [
"glen@datarobot.com"
] | glen@datarobot.com |
4196efbf0128371d31c28213a5223ae4f65d1ec1 | 8e48cc791cd766278ff0524fe5fa81e60b69addd | /edu/views.py | 72b06df39073d126a21dc8b0752c980dbfc5563c | [] | no_license | scimmia/Train | e1c868fccea9d286e9e7586b6974aa2a54cda300 | a882782e994d338b8708cf41eaf3fef0c0c8d5d8 | refs/heads/master | 2021-06-16T02:16:22.773258 | 2019-06-27T08:30:24 | 2019-06-27T08:30:24 | 192,891,716 | 0 | 0 | null | 2021-06-10T21:36:41 | 2019-06-20T09:33:35 | JavaScript | UTF-8 | Python | false | false | 3,724 | py | import os
import xlrd
from django.contrib.auth.models import User
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth import views
from django.contrib.auth.decorators import login_required
# Create your views here.
from edu import utils
from edu.models import City, Org
from .forms import UploadFileForm
from .forms import UserCreatForm, UserManageForm, UserChangePasswordForm
# 用户登陆
def login(request):
template_response = views.login(request, extra_context={'next': '/t/dashboard/'})
return template_response
# 用户退出
def logout(request):
# logout_then_login表示退出即跳转至登陆页面,login_url为登陆页面的url地址
template_response = views.logout_then_login(request, login_url='/t/login/')
return template_response
@login_required
def user_list(request):
form = UserCreatForm(request.POST or None)
oper_form = UserManageForm(request.POST or None)
context = {}
if request.method == 'POST':
if form.is_valid():
oper_form = UserManageForm()
loginname = form.cleaned_data.get('loginname')
try:
user = User.objects.create_user(loginname, None, 'abcd1111')
user.last_name = form.cleaned_data.get('username')
user.save()
context['message'] = u'保存成功'
except:
context['errormsg'] = u'该用户已存在'
elif oper_form.is_valid():
form = UserCreatForm()
operation = oper_form.cleaned_data.get('operation')
ids = request.POST['ids']
if len(ids) > 0:
if operation == '1':
users = User.objects.filter(id__in=ids.split(','))
for u in users:
u.set_password('abcd1111')
u.save()
context['message'] = u'重置成功,密码为abcd1111'
elif operation == '2':
User.objects.filter(id__in=ids.split(',')).update(is_active=True)
context['message'] = u'启用成功'
pass
elif operation == '3':
User.objects.filter(id__in=ids.split(',')).update(is_active=False)
context['message'] = u'停用成功'
pass
else:
context['errormsg'] = u'请选择至少一个账号'
context['form'] = form
context['oper_form'] = oper_form
raw_data = User.objects.filter(is_superuser=False)
list_template = 'edu/user_list.html'
return utils.get_paged_page(request, raw_data, list_template, context)
# 密码更改
@login_required
def change_password(request):
form = UserChangePasswordForm(request.POST or None)
context = {
'form': form,
}
if request.method == 'POST':
if form.is_valid():
u = request.user
if u.check_password(form.cleaned_data.get('old_password')):
password1 = form.cleaned_data.get('new_password')
password2 = form.cleaned_data.get('new_password_2')
if password1 and password2 and len(password1)>0 and password1 == password2:
u.set_password(password1)
u.save()
context['message'] = u'修改成功'
else:
context['errormsg'] = u'新密码不一致或错误'
else:
context['errormsg'] = u'旧密码错误'
return render(request, 'edu/user_change_password.html', context)
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
| [
"affe@lvie.cn"
] | affe@lvie.cn |
e4b3e841b4e404c75c638aa2799e8c778aa16f70 | 00c020614f22ffb529c76b8a1900182a34db308e | /tigereye/tigereye/models/play.py | 3eab74d25d7485f7314374856fe0ce14947c78a9 | [] | no_license | 17865913117/kaik | b527c75b86684ec3b8fc1b9d948376553a0ecec0 | 63eb3a36ea74cea2c9ecbe42e3d07186748076ac | refs/heads/master | 2021-05-11T10:01:56.807862 | 2018-03-03T08:16:36 | 2018-03-03T08:16:36 | 117,048,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | from tigereye.models import db,Model
class Play(db.Model,Model):
# 排期表 存储排期相关信息
# 排期ID 主键
pid = db.Column(db.Integer,primary_key=True)
# 电影院ID
cid = db.Column(db.Integer,default=0,nullable=False)
# 影厅ID
hid = db.Column(db.Integer,default=0,nullable=False)
# 电影ID
mid = db.Column(db.Integer,default=0,nullable=False)
#放映开始时间
start_time = db.Column(db.DateTime, nullable=False)
# 放映时长
duration = db.Column(db.Integer,default=0,nullable=False)
#价格类型
price_type = db.Column(db.Integer)
# 价格
price = db.Column(db.Integer)
# 销售价格
market_price = db.Column(db.Integer)
# 最低价格
lowest_price = db.Column(db.Integer)
# 创建时间
created_time = db.Column(db.DateTime)
# 最后更新时间
updated_time = db.Column(db.DateTime)
# 状态
status = db.Column(db.Integer,default=0,nullable=False,index=True)
| [
"1132800251@qq.com"
] | 1132800251@qq.com |
1c18a6ddc3944da8e2ba5f5ef396825ac6423869 | 6e13f7fdae0144dd0397031c59397b0372f0872a | /horch/layers/_se.py | 43f7469184fbc7b507af7080e15eb8071fc1c974 | [] | no_license | sbl1996/horch | 02e9996f764748c62648464d58318ceff92c87ed | 50d4f4da241a5727e3924a36fbc730dc61284904 | refs/heads/master | 2020-03-20T05:00:43.444092 | 2018-07-27T00:14:45 | 2018-07-27T00:14:45 | 137,201,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | import horch as H
from ._linear import Linear
from ._module import Module
class SE(Module):
"""Squeeze and Excitation Module
"""
def __init__(self, channels, reduction_ratio=16):
super().__init__()
self.channels = channels
self.reduction_ratio = reduction_ratio
reduced_channels = channels // reduction_ratio
self.fc1 = Linear(channels, reduced_channels)
self.fc2 = Linear(reduced_channels, channels)
def forward(self, x):
z = H.mean(x, axis=(2,3))
z = self.fc1(z)
z = H.relu(z)
z = self.fc2(z)
s = H.sigmoid(z)
n, c = s.shape
s = s.reshape(n, c, 1, 1)
x = x * s
return x | [
"sbl1996@126.com"
] | sbl1996@126.com |
103e72ce7d8a54f0c81e71d70915a53c406b3229 | 9c3bb98eb9d0a587a302bdfa811f7b5c6a5a0a37 | /Week 1/id_140/LeetCode_641_140.py | a123c805d3eb59ce02c9f9c91a2d35bd961fbadf | [] | permissive | chenlei65368/algorithm004-05 | 842db9d9017556656aef0eeb6611eec3991f6c90 | 60e9ef1051a1d0441ab1c5484a51ab77a306bf5b | refs/heads/master | 2020-08-07T23:09:30.548805 | 2019-12-17T10:48:22 | 2019-12-17T10:48:22 | 213,617,423 | 1 | 0 | Apache-2.0 | 2019-12-17T10:48:24 | 2019-10-08T10:50:41 | Java | UTF-8 | Python | false | false | 2,270 | py | #
# [641] 设计循环双端队列
# @lc app=leetcode.cn id=641 lang=python3
#
# 执行用时:88 ms 超过81.45%
# @lc code=start
class MyCircularDeque:
def __init__(self, k: int):
"""
Initialize your data structure here. Set the size of the deque to be k.
"""
self.size = k
self.deque = []
def insertFront(self, value: int) -> bool:
"""
Adds an item at the front of Deque. Return true if the operation is successful.
"""
if len(self.deque) < self.size :
self.deque.insert(0,value)
return True
return False
def insertLast(self, value: int) -> bool:
"""
Adds an item at the rear of Deque. Return true if the operation is successful.
"""
if len(self.deque) < self.size :
self.deque.append(value)
return True
return False
def deleteFront(self) -> bool:
"""
Deletes an item from the front of Deque. Return true if the operation is successful.
"""
if len(self.deque) > 0:
self.deque.pop(0)
return True
return False
def deleteLast(self) -> bool:
"""
Deletes an item from the rear of Deque. Return true if the operation is successful.
"""
if len(self.deque) > 0:
self.deque.pop()
return True
return False
def getFront(self) -> int:
"""
Get the front item from the deque.
"""
if len(self.deque) > 0:
return self.deque[0]
return -1
def getRear(self) -> int:
"""
Get the last item from the deque.
"""
if len(self.deque) > 0 :
return self.deque[-1]
else:
return -1
def isEmpty(self) -> bool:
"""
Checks whether the circular deque is empty or not.
"""
if len(self.deque) == 0 :
return True
else:
return False
def isFull(self) -> bool:
"""
Checks whether the circular deque is full or not.
"""
if len(self.deque) == self.size:
return True
else:
return False | [
"54947474+luosu2019@users.noreply.github.com"
] | 54947474+luosu2019@users.noreply.github.com |
7267a57066aa089c4eb4c285f32f7b53f262d55c | 92a3ca163fa2ff948edb3f8e76871de771bd6191 | /exercise_0206.py | 3f3505755bcf0a7f31af9a51cad8f7b8c22e5c5d | [] | no_license | longchushui/How-to-think-like-a-computer-scientist | a5574a09b27dc95512280b29c5b60e8501ff77f4 | 27e77c462e3ba422e8fa20e64709c09f7e226686 | refs/heads/master | 2020-09-21T23:45:33.915134 | 2019-05-03T13:35:58 | 2019-05-03T13:35:58 | 224,975,831 | 1 | 0 | null | 2019-11-30T07:26:51 | 2019-11-30T07:26:50 | null | UTF-8 | Python | false | false | 737 | py | # You look at the clock and it is exactly 2pm. You set an alarm to go off in 51 hours. At what time does the alarm
# go off? (Hint: you could count on your fingers, but this is not what we’re after. If you are tempted to count on
# your fingers, change the 51 to 5100.)
# Write a Python program to solve the general version of the above problem. Ask the user for the time now (in hours),
# and ask for the number of hours to wait. Your program should output what the time will be on the clock when the alarm
# goes off.
current_time = input("What is the current time?")
hours_wait = input("After how many hours shoudl the alarm go off?")
hours = hours_wait % 24
current_time + hours
print("The alarm will go off at" + "hours")
| [
"noreply@github.com"
] | longchushui.noreply@github.com |
8af6b0b5246781b53b190262e492d2ea3265b06b | b6381df52ced5fc4e2dc12a0bfbf2abaf0ac3ce8 | /python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py | 93a5f767867d68110cf7b8f441cc740ecd843cf9 | [
"Apache-2.0"
] | permissive | Kotorinyanya/Paddle | a7aba0bc469bc629a8ef0109f0ed0b143cb9fa43 | dbd25805c88c48998eb9dc0f4b2ca1fd46326482 | refs/heads/master | 2020-03-21T09:26:18.244196 | 2018-06-14T04:26:06 | 2018-06-14T04:26:06 | 138,399,735 | 0 | 1 | Apache-2.0 | 2018-06-23T12:35:15 | 2018-06-23T12:35:15 | null | UTF-8 | Python | false | false | 3,482 | py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import numpy as np
import unittest
def simple_fc_net():
img = fluid.layers.data(name='image', shape=[784], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = img
for _ in xrange(4):
hidden = fluid.layers.fc(
hidden,
size=200,
act='tanh',
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=1.0)))
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = fluid.layers.mean(loss)
return loss
class ParallelExecutorTestingDuringTraining(unittest.TestCase):
def check_network_convergence(self, build_strategy=None):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = simple_fc_net()
test_program = main.clone(for_test=True)
opt = fluid.optimizer.SGD(learning_rate=0.001)
opt.minimize(loss)
batch_size = 32
image = np.random.normal(size=(batch_size, 784)).astype('float32')
label = np.random.randint(0, 10, (batch_size, 1), dtype="int64")
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(startup)
feed_dict = {'image': image, 'label': label}
train_exe = fluid.ParallelExecutor(
use_cuda=True,
loss_name=loss.name,
main_program=main,
build_strategy=build_strategy)
test_exe = fluid.ParallelExecutor(
use_cuda=True,
main_program=test_program,
share_vars_from=train_exe,
build_strategy=build_strategy)
for i in xrange(5):
test_loss, = test_exe.run([loss.name], feed=feed_dict)
test_loss = np.array(test_loss)
train_loss, = train_exe.run([loss.name], feed=feed_dict)
train_loss = np.array(train_loss)
self.assertTrue(
np.allclose(
train_loss, test_loss, atol=1e-8),
"Train loss: " + str(train_loss) + "\n Test loss:" +
str(test_loss))
def test_parallel_testing(self):
build_strategy = fluid.BuildStrategy()
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce
self.check_network_convergence(build_strategy)
def test_parallel_testing_with_new_strategy(self):
build_strategy = fluid.BuildStrategy()
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
self.check_network_convergence(build_strategy)
if __name__ == '__main__':
unittest.main()
| [
"reyoung@126.com"
] | reyoung@126.com |
9ab8facd7b0d8fa144908e9797f1c40d5208001f | 91db8a2a5b8620a3c247f9d0335388e2a6f6c92e | /venv/bin/python-config | da1d7d4920091470bf6de49849c2e88eea7b4fd8 | [] | no_license | FrankFang0830/pyc1 | 39647b142a951dc93a824dfc4ac7554e9e8b0dd5 | 6e87e35c03f23490ad800fee103659d7974f2322 | refs/heads/master | 2020-04-19T15:28:51.370367 | 2019-03-11T19:55:32 | 2019-03-11T19:55:32 | 168,275,769 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,361 | #!/Users/fangpeihao/PycharmProjects/pyc1/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"44310469+FrankFang0830@users.noreply.github.com"
] | 44310469+FrankFang0830@users.noreply.github.com | |
ffd7b753f12a8fff9b52468226a6155d9a60a7c9 | 7bd9be7f25be80791f9220b62025f06170273293 | /end-plugins/pycerebro/examples/excel_export.py | 7ccaaa79c69f3f70111f28d64cfa01c407d3180a | [] | no_license | cerebrohq/cerebro-plugins | ab46b4844adcb12c51d14e21f2c0d8b758b0bb57 | e2e0f97b548ef22957e13d614200027ba89215e0 | refs/heads/master | 2021-11-12T16:25:48.228521 | 2021-10-22T11:25:58 | 2021-10-22T11:25:58 | 143,178,631 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,156 | py | # -*- coding: utf-8 -*-
"""
Пример экспорта задачи(проекта) со всеми вложенными задачами в Excel.
Этот пример демонстрирует экспорт свойств задачи в Excel.
Для записи в формат Excel используется сторонний пакет xlsxwriter (https://xlsxwriter.readthedocs.org/)
Для преобразования текста в формате html используется сторонний пакет html2text (http://www.aaronsw.com/2002/html2text/).
В модуле используются следующие функции:
do_export - Функция экспорта. Принимает параметры: Имя пользователя, Пароль пользователя, Путь до задачи, Путь к файлу Excel.
write - Функция, которая записывает свойства задачи и всех вложенных задач в файл Excel.
connect_db - Функция для соединения с базой данных Cerebro.
write_info, write_error - Функции для логирования.
Пример использования:
do_export('Имя_пользователя', 'Пароль_пользователя', '/Путь/к/Задаче', 'C:/путь/к/файлу.xlsx')
"""
# Имена колонок Excel
columns = { 0: "Задача", 1: "Описание", 2: "Назначено", 3: "Начало", 4: "Окончание", 5: "Запланировано"}
# Ширина колонок
columns_w = { 0: 50, 1: 50, 2: 10, 3: 10, 4: 10, 5: 15}
# Высота строк
row_h = 50
import sys
import os
local_dir = os.path.realpath(__file__).replace('\\', '/').rsplit('/', 1)[0]
backend_dir = local_dir + '/../..'
sys.path.append(backend_dir)
import xlsxwriter
from pycerebro import database, dbtypes
import html2text
import datetime
def do_export(db_user, db_password, task, file_name):
"""
Функция экспорта.
Параметры db_user и db_password - логин и пароль пользователя Cerebro.
task - тектовый локатор(путь) до задачи.
Формат локатора: '/Проект/Задача 1/Задача 2', то есть по сути путь до задачи.
Примечание: Имена задач регистрозависимы!
Пример вызова функции:
::
import excel_export
excel_export.do_export('user', 'password', '/Проект/Задача 1/Задача 2', 'c:/temp/export.xlsx')
::
"""
# Устанавливаем соединение с базой данных
db = connect_db(db_user, db_password)
if (db):
# Создаем файл Excel
wb = xlsxwriter.Workbook(file_name)
if (wb):
# Добавляем лист
ws = wb.add_worksheet()
if (ws):
# Создаем формат для заголовка
format = wb.add_format()
format.set_bold(True) # Жирный шрифт
format.set_align('center_across') # По центру
for col in columns:
# Задаем ширину колонок
ws.set_column(col, col, columns_w[col])
# Создаем Заголовок
ws.write(0, col, columns[col], format)
# Получаем идентификатор задачи (проекта)
task_id = db.task_by_url(task)[0]
if (task_id):
write(db, task_id, ws, wb.add_format())
wb.close()
write_info('Export finished!')
else:
write_error('Can not connect to db: ' + host)
_i = 0
def write(db, task_id, ws, format):
"""
Функция для записи свойств задачи и вложенных задач в файл Excel.
db - переменная для работы с базой данных.
task_id - идентификатор задачи.
ws - лист Excel.
format - переменная форматирования рабочей кники Excel.
"""
global _i
_i += 1
# Создадим формат для выравнивания по верхней границы ячейки и переноса по словам
format_top_text_wrap = format
format_top_text_wrap.set_align('top')
format_top_text_wrap.set_text_wrap()
# Устанавливаем высоту строки
ws.set_row(_i, row_h)
# Получаем задачу по идентификатору
task = db.task(task_id)
# Получаем постановку задачи
task_def = db.task_definition(task_id)
# Получаем полный путь к задаче
name = task[dbtypes.TASK_DATA_PARENT_URL] + task[dbtypes.TASK_DATA_NAME]
# Записываем полный путь к задаче
ws.write(_i, 0, name, format_top_text_wrap)
# Если у задачи есть "Постановка задачи" записываем ее в файл
if (task_def):
ws.write(_i, 1, html2text.html2text(task_def[dbtypes.MESSAGE_DATA_TEXT]), format_top_text_wrap)
# Получаем список пользователей, назначенных на задачу
user_name = task[dbtypes.TASK_DATA_ALLOCATED]
# Если есть назначенные на задачу пользователи, сохраняем их в файл
if (user_name):
ws.write(_i, 2, user_name, format_top_text_wrap)
# Получаем начальную дату отсчета
datetime_2000 = datetime.datetime(2000, 1, 1)
# Получаем дату старта задачи
datetime_start = datetime_2000 + datetime.timedelta(task[dbtypes.TASK_DATA_OFFSET])
# Сохраняем дату старта в файл
ws.write(_i, 3, datetime_start.strftime('%d.%m.%y %H:%M'), format_top_text_wrap)
# Получаем дату окончания задачи
datetime_stop = datetime_start + datetime.timedelta(task[dbtypes.TASK_DATA_DURATION])
# Сохраняем дату окончания в файл
ws.write(_i, 4, datetime_stop.strftime('%d.%m.%y %H:%M'), format_top_text_wrap)
# Сохраняем запланированное время
ws.write(_i, 5, task[dbtypes.TASK_DATA_PLANNED], format_top_text_wrap)
# Если у задачи есть вложенные задачи, так-же сохраняем их в файл
for child in db.task_children(task_id):
write(db, child[dbtypes.TASK_DATA_ID], ws, format)
def connect_db(user, password):
"""
Функция для соединения с базой данных.
user - имя пользователя cerebro.
password - пароль пользователя cerebro.
"""
# Создаем объект базы данных
db = database.Database()
# Соединяемся с базой данных
db.connect(user, password)
return db
def write_info(text):
"""
Функция для логирования информационных сообщений.
text - текст сообщения.
"""
print('info: ' + text)
def write_error(text):
"""
Функция для логирования ошибок.
text - текст сообщения.
"""
print('error: ' + text)
| [
"41910371+cerebroSupport@users.noreply.github.com"
] | 41910371+cerebroSupport@users.noreply.github.com |
fdcffcc361b0f0ee9af4c7f5cf7c6b2ded64ca3a | ea843a03ecf3540e48677136508c5a2f8eec60a1 | /helpdesk/views/permissions.py | 3ebd556ce4645ad3be1e598e04587c0e95be57ea | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"CC-BY-4.0",
"OFL-1.0",
"OFL-1.1",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0"
] | permissive | django-helpdesk/django-helpdesk | 12e9ad0e8499bd3a71c08a1b810b49ac9204a5d0 | 67eb0974c7f163216ececc1d8a715a0144d0375c | refs/heads/main | 2023-08-18T22:31:17.318764 | 2023-06-16T07:42:40 | 2023-06-16T07:42:40 | 985,638 | 931 | 514 | BSD-3-Clause | 2023-09-07T13:44:25 | 2010-10-14T00:59:09 | Python | UTF-8 | Python | false | false | 273 | py | from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from helpdesk.decorators import is_helpdesk_staff
class MustBeStaffMixin(LoginRequiredMixin, UserPassesTestMixin):
def test_func(self):
return is_helpdesk_staff(self.request.user)
| [
"timothy@hobbs.cz"
] | timothy@hobbs.cz |
51e9c4e0d8d935c52940e41e81efef2c0808b323 | ec4586abcc179293656f0afd837b0d521d072a75 | /torchsl/mvsl/projector/__init__.py | 4b01f622b66e8ccb9b0c50a10da542261d18ea73 | [] | no_license | ZDstandup/mvda | e483387e0b7e50c84bc28ffd864d44a724d23762 | 13f854e063f10a9374856d0e2005b233788a645f | refs/heads/master | 2021-01-13T20:42:51.842836 | 2019-12-15T19:16:13 | 2019-12-15T19:16:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | from .linear import MvLinearProjector
__all__ = [
'MvLinearProjector'
]
| [
"inspiros.tran@gmail.com"
] | inspiros.tran@gmail.com |
3932c40fc428cdc74e42f6cb9dda11ae5e98c646 | aea6afb401f3620e9ab28d6226115d1e5cf19c6d | /nlp_architect/data/cdc_resources/gen_scripts/create_wordnet_dump.py | 69104565e9e5b703ca547e996872d6c99cc22d78 | [
"Apache-2.0"
] | permissive | shivdeep-singh/rasa-nlp-architect | 58eaf020a33c4b6635b94e549960e216f720ee56 | 36292ea7b14bbccb972fdb71fcf697d58b161ed3 | refs/heads/master | 2021-10-08T19:56:29.641333 | 2018-12-17T04:25:17 | 2018-12-17T04:25:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,325 | py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import argparse
import json
import logging
from nlp_architect.common.cdc.mention_data import MentionData
from nlp_architect.data.cdc_resources.wordnet.wordnet_online import WordnetOnline
from nlp_architect.utils import io
from nlp_architect.utils.io import json_dumper
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description='Create WordNet dataset only dump')
parser.add_argument('--mentions', type=str, help='mentions file', required=True)
parser.add_argument('--output', type=str, help='location were to create dump file', required=True)
args = parser.parse_args()
def wordnet_dump():
out_file = args.output
mentions_file = args.mentions
logger.info('Loading mentions files...')
mentions = MentionData.read_mentions_json_to_mentions_data_list(mentions_file)
logger.info('Done loading mentions files, starting local dump creation...')
result_dump = dict()
wordnet = WordnetOnline()
for mention in mentions:
page = wordnet.get_pages(mention)
result_dump[page.orig_phrase] = page
with open(out_file, 'w') as out:
json.dump(result_dump, out, default=json_dumper)
logger.info('Wordnet Dump Created Successfully, '
'extracted total of %d wn pages', len(result_dump))
logger.info('Saving dump to file-%s', out_file)
if __name__ == '__main__':
io.validate_existing_filepath(args.mentions)
io.validate_existing_filepath(args.output)
wordnet_dump()
| [
"18912936976@163.com"
] | 18912936976@163.com |
94bffb8d0483e2f5e93288b1a8d22188718fdab7 | e821ccc424b2061f99dacedcb4c22bde35274248 | /reconstruction/src/soft_projection.py | 6dd7280cba587af8bf13dc3905bc20e15f3dd4f5 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] | permissive | itailang/SampleNet | efbeb57456572a02f74adc7cd96eaf7e285c66fd | 3d20c7a62f6788cc56b68d5367ff25a8a2c13fad | refs/heads/master | 2023-07-08T23:40:09.332199 | 2023-06-25T06:04:07 | 2023-06-25T06:04:07 | 227,048,616 | 351 | 40 | NOASSERTION | 2020-04-11T11:49:50 | 2019-12-10T06:48:20 | Python | UTF-8 | Python | false | false | 6,377 | py | from __future__ import print_function
# import system modules
from builtins import object
import os.path as osp
import sys
# add paths
parent_dir = osp.dirname(osp.dirname(osp.abspath(__file__)))
if parent_dir not in sys.path:
sys.path.append(parent_dir)
# import modules
from external.grouping.tf_grouping import group_point, knn_point
import tensorflow as tf
import numpy as np
class SoftProjection(object):
def __init__(
self, group_size, initial_temperature=1.0, is_temperature_trainable=True
):
"""Computes a soft nearest neighbor point cloud.
Arguments:
group_size: An integer, number of neighbors in nearest neighborhood.
initial_temperature: A positive real number, initialization constant for temperature parameter.
is_temperature_trainable: bool.
Inputs:
point_cloud: A `Tensor` of shape (batch_size, num_in_points, 3), original point cloud.
query_cloud: A `Tensor` of shape (batch_size, num_out_points, 3), generated point cloud
Outputs:
projected_point_cloud: A `Tensor` of shape (batch_size, num_out_points, 3),
the query_cloud projected onto its group_size nearest neighborhood,
controlled by the learnable temperature parameter.
weights: A `Tensor` of shape (batch_size, num_out_points, group_size, 1),
the projection weights of the query_cloud onto its group_size nearest neighborhood
dist: A `Tensor` of shape (batch_size, num_out_points, group_size, 1),
the square distance of each query point from its neighbors divided by squared temperature parameter
"""
self._group_size = group_size
# create temperature variable
self._temperature = tf.get_variable(
"temperature",
initializer=tf.constant(initial_temperature, dtype=tf.float32),
trainable=is_temperature_trainable,
dtype=tf.float32,
)
self._temperature_safe = tf.maximum(self._temperature, 1e-2)
# sigma is exposed for loss calculation
self.sigma = self._temperature_safe ** 2
def __call__(self, point_cloud, query_cloud, hard=False):
return self.project(point_cloud, query_cloud, hard)
def _group_points(self, point_cloud, query_cloud):
group_size = self._group_size
_, num_out_points, _ = query_cloud.shape
# find nearest group_size neighbours in point_cloud
_, idx = knn_point(group_size, point_cloud, query_cloud)
grouped_points = group_point(point_cloud, idx)
return grouped_points
def _get_distances(self, grouped_points, query_cloud):
group_size = self._group_size
# remove centers to get absolute distances
deltas = grouped_points - tf.tile(
tf.expand_dims(query_cloud, 2), [1, 1, group_size, 1]
)
dist = tf.reduce_sum(deltas ** 2, axis=3, keepdims=True) / self.sigma
return dist
def project(self, point_cloud, query_cloud, hard):
grouped_points = self._group_points(
point_cloud, query_cloud
) # (batch_size, num_out_points, group_size, 3)
dist = self._get_distances(grouped_points, query_cloud)
# pass through softmax to get weights
weights = tf.nn.softmax(-dist, axis=2)
if hard:
# convert softmax weights to one_hot encoding
weights = tf.one_hot(tf.argmax(weights, axis=2), depth=self._group_size)
weights = tf.transpose(weights, perm=[0, 1, 3, 2])
# get weighted average of grouped_points
projected_point_cloud = tf.reduce_sum(
grouped_points * weights, axis=2
) # (batch_size, num_out_points, 3)
return projected_point_cloud, weights, dist
"""SoftProjection test"""
if __name__ == "__main__":
tf.enable_eager_execution()
projector = SoftProjection(3, initial_temperature=0.01)
sigma = projector.sigma
point_cloud = np.array(
[
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[5, 4, 4],
[4, 5, 4],
[4, 4, 5],
[8, 7, 7],
[7, 8, 7],
[7, 7, 8],
]
)
query_cloud = np.array(
[[0, 0, 0], [1, 0, 0], [2, 0, 0], [5, 5, 5], [7, 7, 8], [7, 7, 8.5]]
)
expected_cloud_soft = np.array(
[
[0.333, 0.333, 0.333],
[1, 0, 0],
[1, 0, 0],
[4.333, 4.333, 4.333],
[7, 7, 8],
[7, 7, 8],
]
)
expected_cloud_hard = np.array(
[[1, 0, 0], [1, 0, 0], [1, 0, 0], [5, 4, 4], [7, 7, 8], [7, 7, 8]]
)
# expend to batch_size = 2
point_cloud = np.stack([point_cloud, point_cloud * 3], axis=0)
query_cloud = np.stack([query_cloud, query_cloud * 3], axis=0)
expected_cloud_soft = np.stack(
[expected_cloud_soft, expected_cloud_soft * 3], axis=0
)
expected_cloud_hard = np.stack(
[expected_cloud_hard, expected_cloud_hard * 3], axis=0
)
point_cloud_pl = tf.convert_to_tensor(point_cloud, dtype=tf.float32)
query_cloud_pl = tf.convert_to_tensor(query_cloud, dtype=tf.float32)
soft_projected_points, soft_projection_weights, dist = projector(
point_cloud_pl, query_cloud_pl
)
hard_projected_points, hard_projection_weights, _ = projector(
point_cloud_pl, query_cloud_pl, hard=True
)
soft_projected_points = soft_projected_points.numpy()
soft_projection_weights = soft_projection_weights.numpy()
hard_projected_points = hard_projected_points.numpy()
hard_projection_weights = hard_projection_weights.numpy()
expected_cloud_soft = expected_cloud_soft.squeeze()
soft_projected_points = soft_projected_points.squeeze()
soft_projection_weights = soft_projection_weights.squeeze()
hard_projected_points = hard_projected_points.squeeze()
hard_projection_weights = hard_projection_weights.squeeze()
print("soft_projection_weights:")
print(soft_projection_weights)
mse = np.mean(np.sum((soft_projected_points - expected_cloud_soft) ** 2, axis=1))
print("mean soft error:")
print(mse)
mse = np.mean(np.sum((hard_projected_points - expected_cloud_hard) ** 2, axis=1))
print("mean hard error:")
print(mse)
| [
"36666820+itailang@users.noreply.github.com"
] | 36666820+itailang@users.noreply.github.com |
0220688c436bb12347280238486b192ca169069a | 75e7a12358d6e8519b081b9615eeb8942ed846a4 | /feature/madarian/preprocess.py | 09f3829af9b693c7f6225f89a477362cbe376416 | [
"MIT"
] | permissive | ArashHosseini/Automatic_Speech_Recognition | 29667a15737262275c2533926de75d3800d4c3da | 6e6ca708b2e164168bb5c107e293b189eb715ac6 | refs/heads/master | 2021-08-28T09:50:44.797472 | 2017-12-11T22:43:46 | 2017-12-11T22:43:46 | 111,156,739 | 1 | 0 | null | 2017-11-17T22:28:03 | 2017-11-17T22:28:03 | null | UTF-8 | Python | false | false | 118 | py | #-*- coding:utf-8 -*-
#!/usr/bin/python
''' Speech Recognition for Madarian
'''
import sys
sys.path.append('../')
| [
"zzw922cn@gmail.com"
] | zzw922cn@gmail.com |
36a3d2b7c4e221ad463229c2714514ce08ecece8 | 5c142d192e5796dff3bc31500834ac4f7517940c | /emis100/views.py | 7e667202305c526455e314f834dca49f39bec63e | [] | no_license | iedu/emis100 | 9197b31ac8470a786ac6a32e9be6a1e85f384d4c | 3da9c3bbe6d67437a9676642464aaf7c02f55a6c | refs/heads/master | 2021-01-18T13:53:55.283036 | 2014-05-17T05:49:44 | 2014-05-17T05:49:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | from django.http import HttpResponse
def home(request):
return HttpResponse('hello how are you!')
| [
"tking@goon.mshome.net"
] | tking@goon.mshome.net |
b140b08b16ac2bea28c486193ce7720a2021d3f5 | 6ae49955d6f68799be532df821718385d214401d | /ASKaQUES/urls.py | c814af58fe490a17884e154fbf59443080ccc6a8 | [] | no_license | Komal-Aggarwal/my-first-blog | 00d85e5b948fb77daa73c23bd709454f18586b75 | d50fb3cb28d368108674cab98e79cdfde763ef0f | refs/heads/master | 2020-08-09T19:02:01.072529 | 2019-12-12T05:28:54 | 2019-12-12T05:28:54 | 214,149,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | """ASKaQUES URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"komaldotin@gmail.com"
] | komaldotin@gmail.com |
c478c0d6aed9b07eae4b4ea4776e7c073d3b4ace | e6a5fce33aad4fcba37842e135a51ba441b06f48 | /Python/Errors and Exceptions/Exceptions.py | 5f5278b64b8e82541685d00cec1a244dd307ddce | [
"MIT"
] | permissive | pavstar619/HackerRank | 6710ddd450b06fbb69da5abad9f570e5e26bbbc0 | 697ee46b6e621ad884a064047461d7707b1413cd | refs/heads/master | 2020-06-18T18:53:53.421685 | 2020-02-18T09:35:48 | 2020-02-18T09:35:48 | 196,408,726 | 0 | 0 | MIT | 2019-07-11T14:18:16 | 2019-07-11T14:18:16 | null | UTF-8 | Python | false | false | 438 | py | class Main:
def __init__(self):
self.t = int(input())
for i in range(0, self.t):
try:
self.n, self.m = map(int, input().split())
print(self.n // self.m)
except ZeroDivisionError as e:
print("Error Code:", e)
except ValueError as e:
print("Error Code:", e)
if __name__ == '__main__':
obj = Main()
| [
"mokit.aust@gmail.com"
] | mokit.aust@gmail.com |
7c9e8b7bfead44bee572fa7070396b90066e9a6e | 746a9c1f65674cd5bcdce6dbd1971b6a16345f9d | /account/forms.py | e907a732e6e142794a14079dcb07a70bcd7fc718 | [] | no_license | mazulo/bookmarks | 4dc25dc09772663c65698d3cc9f5b653fd409ba9 | 5c2ce3c3ad811466c63f7b0f3a21bf33a6a28f5e | refs/heads/master | 2021-01-10T07:23:37.185414 | 2016-03-23T06:40:53 | 2016-03-23T05:40:53 | 54,158,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | from django import forms
from django.contrib.auth.models import User
from .models import Profile
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
class UserRegistrationForm(forms.ModelForm):
password = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(
label='Repeat password', widget=forms.PasswordInput
)
class Meta:
model = User
fields = ('username', 'first_name', 'email')
def clean_password2(self):
cd = self.cleaned_data
if cd['password'] != cd['password2']:
raise forms.ValidationError('Passwords don\'t match.')
return cd['password2']
class UserEditForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
class ProfileEditForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('date_of_birth', 'photo')
| [
"pmazulo@gmail.com"
] | pmazulo@gmail.com |
56343c8b65a1ce28f716be79136e6394dbefa839 | 56d0681e45549679e5b366cab2cfe656e4875c55 | /cspp1-practicem8/is_in.py | 4c90a4db83b489d73547a1f0795c17997cafae5f | [] | no_license | vineethamallu6/vineetha | 267cb650d77ff1045750e0e079f9e812cba3ed89 | 6f0949c801b900e19d56c7485fb6fdc9a6659481 | refs/heads/master | 2020-03-24T17:54:16.534989 | 2018-08-27T23:29:04 | 2018-08-27T23:29:04 | 142,875,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | # Exercise: Is In
# Write a Python function, isIn(char, aStr), that takes in two arguments a character and String and returns the isIn(char, aStr) which retuns a boolean value.
# This function takes in two arguments character and String and returns one boolean value.
def isIn(char, aStr):
'''
char: a single character
aStr: an alphabetized string
returns: True if char is in aStr; False otherwise
'''
mid=len(aStr)//2
if len(aStr)==1:
if aStr[0]==char:
return "True"
else:
return "False"
if len(aStr)==0:
return "False"
if aStr[mid]==char:
return "True"
else:
if aStr[mid]>char:
return isIn(char, aStr[0:mid])
else:
return isIn(char, aStr[mid:len(aStr)])
def main():
data = input()
data = data.split()
print(isIn((data[0][0]), data[1]))
if __name__== "__main__":
main()
| [
"vineethamallu6@msitprogram.net"
] | vineethamallu6@msitprogram.net |
9037bc9505fc17c168ab84763c62ab55d05a597d | 0744dcc5394cebf57ebcba343747af6871b67017 | /external/iotivity/iotivity_1.2-rel/service/notification/cpp-wrapper/examples/linux/SConscript | 21d6d8e996fba6e438c51a517a7caa96509b8d79 | [
"Apache-2.0",
"GPL-2.0-only",
"MIT",
"BSD-3-Clause"
] | permissive | Samsung/TizenRT | 96abf62f1853f61fcf91ff14671a5e0c6ca48fdb | 1a5c2e00a4b1bbf4c505bbf5cc6a8259e926f686 | refs/heads/master | 2023-08-31T08:59:33.327998 | 2023-08-08T06:09:20 | 2023-08-31T04:38:20 | 82,517,252 | 590 | 719 | Apache-2.0 | 2023-09-14T06:54:49 | 2017-02-20T04:38:30 | C | UTF-8 | Python | false | false | 4,548 | #******************************************************************
#
# Copyright 2016 Samsung Electronics All Rights Reserved.
#
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
##
# Notification build script
##
Import('env')
lib_env = env.Clone()
SConscript(env.get('SRC_DIR') + '/service/third_party_libs.scons', 'lib_env')
notification_sample_env = lib_env.Clone()
target_os = env.get('TARGET_OS')
######################################################################
# Build flags
######################################################################
notification_sample_env.AppendUnique(CPPPATH = ['../../../include'])
notification_sample_env.AppendUnique(CPPPATH = ['../../provider/inc'])
notification_sample_env.AppendUnique(CPPPATH = ['../../consumer/inc'])
notification_sample_env.AppendUnique(CPPPATH = ['../../common'])
notification_sample_env.AppendUnique(CPPPATH = ['../../../../../resource/csdk/stack/include'])
notification_sample_env.AppendUnique(CPPPATH = ['../../../../../resource/csdk/connectivity/api'])
notification_sample_env.AppendUnique(CPPPATH = ['../../../src/common'])
notification_sample_env.PrependUnique(LIBS = [
'liboctbstack',
'oc_logger',
'oc',
'connectivity_abstraction',
'libcoap',
'resource_directory'
])
notification_sample_env.AppendUnique(CXXFLAGS = ['-std=c++0x','-frtti'])
if target_os not in ['windows', 'winrt']:
notification_sample_env.AppendUnique(CXXFLAGS = ['-O2', '-g', '-Wall', '-fmessage-length=0'])
if target_os not in ['darwin', 'ios', 'windows', 'winrt']:
notification_sample_env.AppendUnique(LINKFLAGS = ['-Wl,--no-undefined'])
if target_os == 'linux':
notification_sample_env.AppendUnique(LIBS = ['pthread'])
if target_os == 'android':
notification_sample_env.AppendUnique(CXXFLAGS = ['-frtti', '-fexceptions'])
notification_sample_env.AppendUnique(LIBS = ['gnustl_shared','log'])
if not env.get('RELEASE'):
notification_sample_env.PrependUnique(LIBS = ['gcov'])
notification_sample_env.AppendUnique(CCFLAGS = ['--coverage'])
if env.get('WITH_CLOUD') == True:
notification_sample_env.AppendUnique(CPPDEFINES = ['WITH_CLOUD'])
with_mq = env.get('WITH_MQ')
if 'SUB' in with_mq:
notification_sample_env.AppendUnique(CPPDEFINES = ['MQ_SUBSCRIBER', 'WITH_MQ'])
print "MQ SUB support"
if 'PUB' in with_mq:
notification_sample_env.AppendUnique(CPPDEFINES = ['MQ_PUBLISHER', 'WITH_MQ'])
print "MQ PUB support"
if 'BROKER' in with_mq:
notification_sample_env.AppendUnique(CPPDEFINES = ['MQ_BROKER', 'WITH_MQ'])
print "MQ Broker support"
if env.get('WITH_TCP') == True:
notification_sample_env.AppendUnique(CPPDEFINES = ['WITH_TCP'])
if env.get('SECURED') == '1':
notification_sample_env.AppendUnique(LIBS = ['mbedtls', 'mbedx509', 'mbedcrypto'])
####################################################################
# Source files and Targets
######################################################################
notification_sample_provider_env = notification_sample_env.Clone()
notification_sample_provider_env.AppendUnique(LIBS = 'libnotification_provider_wrapper')
notification_sample_provider_env.AppendUnique(LIBS = 'libnotification_provider')
notificationproviderwrapper = notification_sample_provider_env.Program('notificationproviderwrapper', 'notificationserviceprovider.cpp')
i_notificationprovider = notification_sample_provider_env.Install(env.get('BUILD_DIR'), notificationproviderwrapper)
notification_sample_consumer_env = notification_sample_env.Clone()
notification_sample_consumer_env.AppendUnique(LIBS = 'libnotification_consumer_wrapper')
notification_sample_consumer_env.AppendUnique(LIBS = 'libnotification_consumer')
notificationconsumerwrapper = notification_sample_consumer_env.Program('notificationconsumerwrapper', 'notificationserviceconsumer.cpp')
i_notificationconsumer = notification_sample_consumer_env.Install(env.get('BUILD_DIR'), notificationconsumerwrapper)
| [
"hj_elena.kim@samsung.com"
] | hj_elena.kim@samsung.com | |
77de8458b7a1783c09c39d23c67f84537adfb676 | 724a6b73dfc4f6434290ef1c68932485a159e5a2 | /src/test/resources/PageObjectRepository/qa/MyStore.spec | d86dab26ddd2e8141d7635b7f513d28304336f15 | [] | no_license | Saxena3623/SeleniumWebDriver_BDD | c7839088ba5ea3c5f355d51969ba39d427ed4a05 | 8ef3fbcf81ae72fc2b11bf53ec18268f560e2d9f | refs/heads/master | 2023-05-12T14:06:19.043565 | 2021-06-01T06:10:40 | 2021-06-01T06:10:40 | 372,423,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | spec | Element Locator Type Locator
===================================================================================================
list_container css div[class='product-image-container']
title_product css #layer_cart_product_title
btn_addToCart css a[title="Add to cart"]
modal_cart css #layer_cart div[class*='layer_cart_cart']
btn_proceed xpath //div[@id="layer_cart"]//span[contains(text(),"Proceed to checkout")]
btn_cartProceedCheckOut xpath //p[@class="cart_navigation clearfix"]//span[contains(text(),"${text}")]
checkout_tabs xpath //ul[@id="order_step"]//span[contains(text(),"${text}")]
account_name css .account span
orderDetails xpath //div[@id="center_column"]
checkBox_terms xpath //div[@class="checker"]//input
option_payment css .payment_module .${text}
details_order xpath //*[@id='order-list']/tbody/tr/td[1]/a
txtConfirm css .box .cheque-indent .dark
btn_myOrders css .myaccount-link-list [title=Orders]
order_date xpath //table[@id="order-list"]/tbody/tr[1]/td[contains(@class,"history_date")]
order_ref1 css .box.order-confirmation
personal_info css .myaccount-link-list [title=Information]
msg_success css .alert-success | [
"rishusaxena@qainfotech.com"
] | rishusaxena@qainfotech.com |
4f1dfff019af33c3a44016eaa6afe6cf29a81a46 | c6abe0c4ec153a1b713bc91c1bb6948651e9428c | /snortparser.py | c88f3b7d9884f481e6794f3fab414e7addef20f7 | [] | no_license | david-tub/snort-net-viewer | b532d23bef62689c6dd394583167bceb172925c4 | f1fbc389015b411c0cf2a5b6a1a0107a9e740924 | refs/heads/master | 2023-03-22T13:47:03.803197 | 2021-03-22T13:43:51 | 2021-03-22T13:43:51 | 317,178,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,988 | py | import copy
import datetime
import re
import pandas as pd
current_min_number_alerts = 0 # minimal number of an alert
current_max_number_alerts = 0 # maximal number of an alert
EDGE_WEIGHT_SCALING_MIN = 1 # scaled width of the edges
EDGE_WEIGHT_SCALING_MAX = 10
TYPE_ATTACKER = 'Attacker'
TYPE_VICTIM = 'Victim'
TYPE_COMPROMISED = 'Compromised'
class Alert:
name = None
classification = None
priority = None
from_ip = None
to_ip = None
timestamp = None
from_port = ""
to_port = ""
additional = ""
def validate(self):
if self.name and self.classification and self.priority and self.from_ip and self.to_ip and self.timestamp:
return True
else:
return False
def read_log_file(file_path):
"""
Read log file and create a list of alerts (strings)
:param file_path: file path of the Snort log file (alert)
:return: list of list of strings (list of alerts consists of the lines)
"""
print('[*] Start reading the log file')
all_alerts = [] # all alerts, list of alerts consists of all lines
with open(file_path, 'r') as current:
lines = current.readlines()
if not lines:
print('FILE IS EMPTY')
return -1
else:
# load alerts one by one
alert = [] # new alert
for line in lines:
if line.startswith('[**]'):
# new alert
if len(alert) != 0:
all_alerts.append(alert)
alert = [line.strip('\n')]
elif line != '\n':
# line belongs to the same alert
alert.append(line.strip('\n'))
# add last alert to list
all_alerts.append(alert)
print('[*] Log file successfully read')
return all_alerts
def import_alerts(all_alerts):
"""
Go over all_alerts list and extract details to create Alert objects
:param all_alerts: list of list of strings
:return: list of validated alerts (list of alert objects)
"""
print('[*] Start importing alerts')
alerts_validated = []
alerts_corrupted = []
for alert in all_alerts:
alert_obj = Alert()
# m = re.search(r'(\b[a-zA-Z -]+\b)', alert[0])
m = re.search(r'\[\*\*\].*\[(.*?)\](.*?)\[\*\*\]', alert[0])
if m:
alert_obj.name = m.group(2).strip()
m = re.search(r'\[Classification: (\b[a-zA-Z -]+\b)\].\[Priority: ([1-9])\]', alert[1])
if m:
alert_obj.classification = m.group(1).strip()
alert_obj.priority = m.group(2).strip()
m = re.search(
r'([0-9/]+-[0-9:.]+)\s+.*?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d{1,5})\s+->\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d{1,5})',
alert[2])
if not m:
m = re.search(
r'([0-9/]+-[0-9:.]+)\s+.*?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+->\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
alert[2])
if m:
alert_obj.timestamp = str(datetime.datetime.now().year) + '/' + m.group(1).strip()
alert_obj.from_ip = m.group(2).strip()
alert_obj.to_ip = m.group(3).strip()
# print('Date: ' + m.group(1) + ' Time: ' + m.group(2) + ' FromIP: ' + m.group(3) + ' ToIP: ' + m.group(4))
else:
alert_obj.timestamp = str(datetime.datetime.now().year) + '/' + m.group(1).strip()
alert_obj.from_ip = m.group(2).strip()
alert_obj.from_port = m.group(3).strip()
alert_obj.to_ip = m.group(4).strip()
alert_obj.to_port = m.group(5).strip()
# lines 3 and 4 contain packet information
# because every line will be unique, we will leave it out for now
# if 3 < len(alert):
# alert_obj.additional = alert[3]
# if 4 < len(alert):
# alert_obj.additional += '\n' + alert[4]
if 5 < len(alert):
# Xref information
alert_obj.additional = alert[5]
# alert object created, now validate and add to new list
if alert_obj.validate():
alerts_validated.append(alert_obj)
else:
alerts_corrupted.append(alert_obj)
# sort all alerts by timestamp
alerts_validated = sorted(alerts_validated, key=lambda x: datetime.datetime.strptime(x.timestamp, "%Y/%m/%d-%H:%M:%S.%f"))
print('[*] Importing alerts successful (' + str(len(alerts_validated)) + ' successful / ' + str(
len(alerts_corrupted)) + ' ignored)')
return alerts_validated
def calculate_time_ranges(imported, intv=5):
"""
calculates intv time ranges (steps)
:param imported: validated alerts (list of alert objects)
:return: list with timestamps
"""
start = datetime.datetime.strptime(imported[0].timestamp, "%Y/%m/%d-%H:%M:%S.%f")
end = datetime.datetime.strptime(imported[len(imported) - 1].timestamp, "%Y/%m/%d-%H:%M:%S.%f")
diff = (end - start) / intv
result = []
for i in range(intv - 1):
result.append((start + diff * i)) # .strftime("%Y/%m/%d-%H:%M:%S.%f"))
result.append(end) # .strftime("%Y/%m/%d-%H:%M:%S.%f")
return result
def generate_nodes_and_edges(alerts_validated, time_ranges=None):
"""
generates nodes and edges from the alert objects and returns one or a list of nodes and edges
:param alerts_validated: list of alert objects
:param time_ranges: if existing: time ranges/steps to which the nodes and edges need to be divided
:return: a list of nodes and a list of edges (no time ranges) OR a list of dicts of nodes and a list of dicts of edges (according to the time ranges)
"""
print('[*] Start generating nodes and edges')
all_nodes = []
all_edges = []
nodes_list = []
edges_list = []
i = 0
for alert_obj in alerts_validated:
if time_ranges:
current_alert_time = datetime.datetime.strptime(alert_obj.timestamp, "%Y/%m/%d-%H:%M:%S.%f")
if current_alert_time > time_ranges[i]:
# current alert exceeds the current time range
# copy/save all nodes & edges until now
cur_nodes_dict = pd.DataFrame.from_records(node.to_dict() for node in copy.deepcopy(all_nodes))
cur_edges_dict = pd.DataFrame.from_records(edge.to_dict() for edge in copy.deepcopy(all_edges))
nodes_list.append(cur_nodes_dict)
edges_list.append(cur_edges_dict)
i = i + 1
# move current alert to the right time range
# check if the alert also exceeds the next time range and jump if necessary
for x in range(i, len(time_ranges) - 1):
if current_alert_time > time_ranges[i]:
# current alert exceeds the current time range
# copy/save all nodes & edges until now
cur_nodes_dict = pd.DataFrame.from_records(node.to_dict() for node in copy.deepcopy(all_nodes))
cur_edges_dict = pd.DataFrame.from_records(edge.to_dict() for edge in copy.deepcopy(all_edges))
nodes_list.append(cur_nodes_dict)
edges_list.append(cur_edges_dict)
i = i + 1
# NODE
# check if node already exists and update type
# check From IP
existing_node, index = find_node_by_ip(all_nodes, alert_obj.from_ip)
if existing_node:
if existing_node.get_type() == TYPE_VICTIM:
existing_node.set_type(TYPE_COMPROMISED)
if alert_obj.from_port:
existing_node.add_port_out(alert_obj.from_port)
all_nodes[index] = existing_node
else:
# create new node
new_node = Node(alert_obj.from_ip, TYPE_ATTACKER)
if alert_obj.from_port:
new_node.add_port_out(alert_obj.from_port)
all_nodes.append(new_node)
# check To IP
existing_node, index = find_node_by_ip(all_nodes, alert_obj.to_ip)
if existing_node:
if alert_obj.to_port:
existing_node.add_port_in(alert_obj.to_port)
all_nodes[index] = existing_node
else:
# create new node
new_node = Node(alert_obj.to_ip, TYPE_VICTIM)
if alert_obj.to_port:
new_node.add_port_in(alert_obj.to_port)
all_nodes.append(new_node)
# EDGE
# check if edge already exists
flag = False
for index, edge in enumerate(all_edges):
# compare with each edge and check if there exists already the same type
if edge.compare_with_alert(alert_obj):
edge.merge_with_alert(alert_obj)
all_edges[index] = edge
flag = True
if not flag:
new_edge = Edge([alert_obj.name], [alert_obj.classification], [alert_obj.priority], alert_obj.from_ip,
alert_obj.to_ip, [0], [alert_obj.from_port], [alert_obj.to_port], [alert_obj.timestamp],
[alert_obj.additional])
all_edges.append(new_edge)
cur_nodes_dict = pd.DataFrame.from_records(node.to_dict() for node in copy.deepcopy(all_nodes))
cur_edges_dict = pd.DataFrame.from_records(edge.to_dict() for edge in copy.deepcopy(all_edges))
nodes_list.append(cur_nodes_dict)
edges_list.append(cur_edges_dict)
print('[*] Nodes and Edges successfully generated')
if time_ranges:
return nodes_list, edges_list
else:
return all_nodes, all_edges
def export_to_csv(nodes, edges):
"""
generates a csv file and saves under fix name
:param nodes: list of nodes
:param edges: list of edges
:return: nothing
"""
print('[*] Start exporting to csv (nodes.csv, edges.csv in root folder)')
df_n = pd.DataFrame.from_records(node.to_dict() for node in nodes)
df_n.to_csv('nodes.csv', index=False)
df_e = pd.DataFrame.from_records(edge.to_dict() for edge in edges)
df_e.to_csv('edges.csv', index=False)
print('[*] csv successfully exported')
return 'nodes.csv', 'edges.csv'
def scale_in_range(x, min_before, max_before, min_after=1, max_after=10):
"""converts values in the range [min_before,max_before] to values in the range [min_after,max_after]"""
return float(min_after + float(x - min_before) * float(max_after - min_after) / (max_before - min_before))
def find_node_by_ip(all_nodes, ip):
"""finds an existing node in all_nodes by given IP"""
for index, node in enumerate(all_nodes):
if node.ip == ip:
return node, index
return None, None
def find_min_max_number_of_alerts(edges):
"""return the minimal and maximal number of alerts connected to an edge"""
min_value = None
max_value = None
for edge in edges:
value = edge.number_alerts
if not min_value:
min_value = value
if not max_value:
max_value = value
if value < min_value:
min_value = value
if value > max_value:
max_value = value
return min_value, max_value
class Node:
# parameterized constructor
def __init__(self, ip_address, type):
self.ip = ip_address
self.type = type # 1: Attacker, 2: Victim, 3: Compromised
self.ports_in = []
self.ports_out = []
def add_port_in(self, port):
if port not in self.ports_in:
self.ports_in.append(port)
def add_port_out(self, port):
if port not in self.ports_out:
self.ports_out.append(port)
def get_type(self):
return self.type
def set_type(self, new_type):
self.type = new_type
def to_dict(self):
return {
'IP': self.ip,
'Type': self.type,
'Ports in': self.ports_in,
'Ports out': self.ports_out,
}
class Edge:
# parameterized constructor
def __init__(self, attack_names, classifications, priorities, from_ip, to_ip, directions, from_ports, to_ports, timestamps,
additional):
self.attack_names = attack_names
self.classifications = classifications
self.priorities = priorities
self.from_ip = from_ip
self.to_ip = to_ip
self.directions = directions
self.number_alerts = [1] # number of alerts per attack
self.from_ports = []
if from_ports:
self.from_ports.append(from_ports)
self.to_ports = []
if to_ports:
self.to_ports.append(to_ports)
self.timestamps = []
self.timestamps.append(timestamps)
self.additional = []
if additional:
self.additional.append(additional)
self.weight = 1 # sum of all alerts related to this edge
def update_weight(self, min_number_alerts, max_number_alerts):
"""update the weight (width of the edge) corresponding to the number of alerts and scale limit"""
self.weight = scale_in_range(self.number_alerts, min_number_alerts, max_number_alerts,
EDGE_WEIGHT_SCALING_MIN, EDGE_WEIGHT_SCALING_MAX)
def compare_with_alert(self, alert):
"""compare edge with alert and return true if can be merged"""
if (self.from_ip == alert.from_ip and self.to_ip == alert.to_ip) or (
self.from_ip == alert.to_ip and self.to_ip == alert.from_ip):
return True
else:
return False
def merge_with_alert(self, alert):
"""add alert information to existing edge"""
# check if attack exists already in this edge
index = self.find_attack_in_edge(alert)
if isinstance(index, int):
# add alert to attack
self.merge_alert_with_attack(index, alert)
else:
# create new attack
self.add_new_attack_to_edge(alert)
def find_attack_in_edge(self, alert):
"""find existing attack in current edge and return index (consider also direction!)"""
if alert.from_ip == self.from_ip:
direction = 0
else:
direction = 1
for index, attack_name in enumerate(self.attack_names):
if attack_name == alert.name:
if self.directions[index] == direction:
return index
return None
def merge_alert_with_attack(self, index, alert):
"""add all alert information to the corresponding attack in the edge (at index)"""
if alert.from_port not in self.from_ports[index]:
self.from_ports[index].append(alert.from_port)
if alert.to_port not in self.to_ports[index]:
self.to_ports[index].append(alert.to_port)
if alert.additional not in self.additional[index]:
self.additional[index].append(alert.additional)
if alert.timestamp not in self.timestamps[index]:
self.timestamps[index].append(alert.timestamp)
self.number_alerts[index] += 1 # increase number of this alert
self.weight += 1 # increase sum of alerts related to the current edge
def add_new_attack_to_edge(self, alert):
if alert.from_ip == self.from_ip:
direction = 0
else:
direction = 1
"""add new attack in edge (new index)"""
self.attack_names.append(alert.name)
self.classifications.append(alert.classification)
self.priorities.append(alert.priority)
self.directions.append(direction)
self.from_ports.append([alert.from_port])
self.to_ports.append([alert.to_port])
self.timestamps.append([alert.timestamp])
self.additional.append([alert.additional])
self.number_alerts.append(1)
self.weight += 1
def to_dict(self):
# shorten list of timestamps (to big for csv)
# for index, timestamps in enumerate(self.timestamps):
# if len(timestamps) > 10:
# # take the first 10 timestamps -> ToDo ?!
# self.timestamps[index] = timestamps[:10]
# Update: just take first and last timestamp
for index, timestamps in enumerate(self.timestamps):
self.timestamps[index] = str(timestamps[0]) + " --- " + str(timestamps[-1])
return {
'Attack Name': self.attack_names,
'Classification': self.classifications,
'Priority': self.priorities,
'From IP': self.from_ip,
'To IP': self.to_ip,
'Attack Direction': self.directions,
'From Ports': self.from_ports,
'To Ports': self.to_ports,
'Timestamps': self.timestamps,
'Count': self.number_alerts,
'Weight': self.weight,
'Additional': self.additional,
}
| [
"david.car@gmx.de"
] | david.car@gmx.de |
f9da4fa860603ab92d37c5407c7b32fa27b16dd9 | c40a1258e203daa34090a3ab0ccc69866b6dd8e4 | /2.py | 36563fc7ed7764c7dec07f720a64b8ce49fc4c1c | [] | no_license | marekszlosek/PythonIS | fa2a42a8ab53d017025a13b435ae145af42f43fe | 1657001d1da23c5c0e08bf71491d92683455c086 | refs/heads/master | 2021-02-13T19:17:54.657429 | 2020-03-31T15:03:49 | 2020-03-31T15:03:49 | 244,722,956 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | print("KALKULATOR PRZELICZANIA STOPNI FARENHEITA NA CELSJUSZA \n")
farenheit = input("Wpisz stopnie Farenheita: ")
farenheit = float(farenheit)
celsjusz = (farenheit-32)/1.8
celsjusz = str(celsjusz)
farenheit = str(farenheit)
print("Wzór: (Stopnie Farenheita -32)/1.8 = Stopnie Celsjusza ")
print( "(" + farenheit + " - 32) / 1.8 = " + celsjusz ) | [
"noreply@github.com"
] | marekszlosek.noreply@github.com |
0ae53c0486a272162874ca46f41ecb302e6654f0 | a9d65a3fb6e990c5bb250cfde44220182f6cd430 | /indra/tools/analyze_ontology.py | 2f8bda343d488ebd0f277e9aedab4e858bfe2742 | [
"BSD-2-Clause",
"BSD-2-Clause-Views"
] | permissive | dianakolusheva/indra | 66e6c69b762922d4f79757e388b693f76b3fcd56 | 205a719c5b1ff2333e415476b4136e8c57c22949 | refs/heads/master | 2022-03-14T23:10:57.718762 | 2022-02-11T14:58:12 | 2022-02-11T14:58:12 | 170,338,649 | 0 | 0 | BSD-2-Clause | 2019-02-12T15:09:36 | 2019-02-12T15:09:30 | Python | UTF-8 | Python | false | false | 1,402 | py | from collections import Counter, defaultdict
import networkx
from indra.ontology.bio import bio_ontology
def plot_problem(problem):
import matplotlib.pyplot as plt
plt.ion()
plt.figure()
G = bio_ontology.subgraph(problem)
pos = networkx.spring_layout(G)
networkx.draw_networkx(G, pos, node_color='pink')
edge_labels = networkx.get_edge_attributes(G, 'source')
networkx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
plt.show()
if __name__ == '__main__':
bio_ontology.initialize()
xrefs = [(e[0], e[1]) for e in bio_ontology.edges(data=True) if
e[2]['type'] == 'xref']
xrefg = bio_ontology.edge_subgraph(xrefs)
comps = networkx.algorithms.strongly_connected_components(xrefg)
problems = []
for comp in comps:
namespaces = [bio_ontology.get_ns(node) for node in comp]
cnt = Counter(namespaces)
if any(v > 1 for k, v in cnt.items()):
problems.append(comp)
print('Found %d problems in total' % len(problems))
problems_by_ns = defaultdict(list)
for problem in problems:
nscnt = Counter([bio_ontology.get_ns(n) for n in problem])
namespaces = [ns for ns, cnt in nscnt.items() if cnt > 1]
for ns in namespaces:
problems_by_ns[ns].append(problem)
for ns, problems_ns in problems_by_ns.items():
print(ns, len(problems_ns)) | [
"ben.gyori@gmail.com"
] | ben.gyori@gmail.com |
680db4ce7eaa94fa1fa0de9790f1deff0702799a | 31592ec38e2fbf3e6d11fe766eba7483b90d980b | /todoapp/urls.py | f51e1edbacb01f39a0635266a71b38e7272fa80e | [] | no_license | nehaannvinu/ToDo-App | 24f63c6c02e4ace2e945f41bbb0dfcdc45bd0c1f | aaec7dced9de99ba78c209c79c81f869348f78cc | refs/heads/master | 2023-07-21T05:31:40.595256 | 2021-08-28T14:39:52 | 2021-08-28T14:39:52 | 400,815,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | from django.urls import path
from . import views
urlpatterns =[
path('list/', views.list_todo_items),
path('insert_todo/', views.insert_todo, name='insert_todo'),
path('delete_todo/<int:todo_id>', views.delete_todo, name='delete_todo')
] | [
"nehaannvinu2607@gmail.com"
] | nehaannvinu2607@gmail.com |
b288cb10d00690cd17cc17009d6ddb923bc29e73 | 6a79f3a84b6f96e9b4f3f8e48be8ef842035a4fa | /Python/Main.py | d6b213721ef22266cf2c1a4642a1a97f1808d176 | [] | no_license | livepy/MedicalTriCorder | 2adc0e00093fe8680b02c4541be42a5ccbfd8e8b | 069839314cc2d663fb8e0d20bb56397dc18ea4e7 | refs/heads/master | 2021-01-17T14:12:13.633611 | 2013-02-14T20:39:37 | 2013-02-14T20:39:37 | 43,578,746 | 1 | 0 | null | 2015-10-02T22:12:53 | 2015-10-02T22:12:52 | null | UTF-8 | Python | false | false | 1,814 | py | import os
import serial
import BTSetup
import MainForm
import tkMessageBox
from Tkinter import *
from serial.tools import list_ports
def main():
showSetup()
def showSetup():
if (os.name == "nt"):
height = 250
width = 400
else:
height = 250
width = 500
root = Tk()
root.protocol('WM_DELETE_WINDOW', exitApplication)
root.resizable(0,0)
#root.iconbitmap("tricorder.png")
setupApp = BTSetup.BTSetup(master = root)
setupApp.master.title("Medical TriCorder - Setup")
xp = (root.winfo_screenwidth() / 2) - (width / 2) - 8
yp = (root.winfo_screenheight() / 2) - (height / 2) - 8
setupApp.master.geometry("{0}x{1}+{2}+{3}".format(str(width), str(height), xp, yp))
setupApp.master.minsize(width, height)
setupApp.master.maxsize(width, height)
setupApp.mainloop()
if (setupApp.selectedPort == ''):
tkMessageBox.showinfo("HSU Error", "You must selected a valid HSU port")
showSetup()
else:
showMainForm(setupApp.selectedPort)
def showMainForm(serialPort):
height = 130
width = 300
root = Tk()
root.protocol('WM_DELETE_WINDOW', exitApplication)
root.resizable(0,0)
#root.iconbitmap("tricorder.png")
app = MainForm.MainForm(master = root, serialPort = serialPort)
app.master.title("Medical TriCorder")
xp = (root.winfo_screenwidth() / 2) - (width / 2) - 8
yp = (root.winfo_screenheight() / 2) - (height / 2) - 8
app.master.geometry("{0}x{1}+{2}+{3}".format(str(width), str(height), xp, yp))
app.master.minsize(width, height)
app.master.maxsize(width, height)
app.mainloop()
def exitApplication():
os._exit(0)
if __name__ == "__main__":
main()
| [
"mattwise1985@gmail.com"
] | mattwise1985@gmail.com |
2f6baeab3e7045c810e405dcd4226f5aea134be2 | 8c2f12f78c32405e57001ad39f1233dfa5bc8c1e | /mycode/neopixel_light.py | 1274799ee6778bcfd998b38dea7cf0f439f45e01 | [] | no_license | hoony6134/raspberrypi | 9931edfb4c006b1b4699ee7494c1703c771d4bc2 | 25cc80e70f75a188625ce9da39c86510e3e3aa1e | refs/heads/main | 2023-03-31T21:54:32.723618 | 2021-03-27T05:13:11 | 2021-03-27T05:13:11 | 347,255,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | import time
import board
import neopixel
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
LIGHT_PIN = 14
GPIO.setup(LIGHT_PIN, GPIO.IN)
pixel_pin = board.D18
num_pixels = 7
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(
pixel_pin, num_pixels, brightness=0.2, auto_write=False, pixel_order=ORDER)
while True:
try:
if GPIO.input(LIGHT_PIN):
print('Dark')
pixels.fill((255, 255, 255))
pixels.show()
else:
print('Bright')
pixels.fill((0, 0, 0))
pixels.show()
time.sleep(0.2)
except KeyboardInterrupt:
GPIO.cleanup()
print('clean')
break | [
"noreply@github.com"
] | hoony6134.noreply@github.com |
80b7d8ec7ede6c6a042edef6326700643ed2c33f | 8db577ee9b17b9230b767e35eac7113af13743a1 | /backend/Api/endpoints/user.py | 25084546f0aa323f6d53544d4de67259bd89be23 | [] | no_license | Dariant31/bibcamp | 5b296552e9ba264637a89b2eef02b586459c2731 | cface6cf5a0159e6d88b4b7aa89fb13e788e61b7 | refs/heads/main | 2023-01-19T10:49:16.265062 | 2020-11-29T22:17:07 | 2020-11-29T22:17:07 | 317,048,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | from fastapi import APIRouter
from Api.dtos.user import user_dto, user_dto_input
from Api.models.user import Users
router = APIRouter()
@router.get("/{user_sub}")
async def getUsers(user_sub: str):
return await user_dto.from_queryset_single(Users.get(sub=user_sub))
# @router.put("/{user_id}")
# async def putUser(user_id: int, user: user_dto_input):
# await Users.filter(id=user_id).update(**user.dict())
# return await user_dto.from_queryset_single(Users.get(id=user_id))
async def postUser(user: user_dto_input):
user_obj = await Users.create(**user.dict())
return await user_dto.from_tortoise_orm(user_obj)
| [
"dariant.siswadie@codecamp-n.com"
] | dariant.siswadie@codecamp-n.com |
7033b3c8312229dfa43dacae6507b407b6a512a0 | cf7483ea88330b695d459eafd8307fa54f7b7fe3 | /PythonTutorial/ListUnpacking.py | a5de20e1789592cb7648aeee819a9bf824f8b82c | [] | no_license | buukhanh243/Python100Days | 048ab70712247dc2fd2be7b673699715973d091e | 0d32e1b041d6a62e8a1b25f7249483b818f630b5 | refs/heads/master | 2023-07-17T02:40:26.297505 | 2021-08-12T15:24:37 | 2021-08-12T15:24:37 | 386,797,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | ''' Lưu ý:
amazoncart = [a, b, c, d]
new_cart = amazoncart
new_cart[0] = 'gum'
--> new_Cart va amazon_cart = [gum,b,c,d]---> muon coppy ma ko thay doi list amazon
----> ta dung ----> new_cart = amazoncart[:]
amazon_cart.insert(1, 100) ---> a,100,b,c,d
amazon_cart.extend([1, 100]) ---> a,b,c,d,1,100
amazon_cart.pop() ---> xoa cuoi phan tu --> a,b,c ---->pop(0): xoa vi tri so 0---->b,c,d ---> pop la truyen vao index
amazon_cart.remove(truyền vào value: c) ----> a,b,d
new_list = amazon.remove()--->newlist = non
new_list = amazon.pop()---> new = d
ngoài ra còn có hàm count('thuộc tính') -- đếm số thuộc tính trong mảng
hàm index[0:'thuộc tính']--- vị trí xuất hiện thuộc tính
'''
#a,b,c = 1,2,3
a,b,c = [1,2,3]
print(f'{a} & {b} & {c}')
'''Get a=1, b=2, c=3'''
d,f,g, *anyIfYouLike,d = [1,2,3,4,5,6,7,8,9]
#[4, 5, 6, 7, 8]
print(anyIfYouLike)
print(d)
#user2 = dict(name='john son')-->{'name':'john son'} | [
"buukhanh243@gmail.com"
] | buukhanh243@gmail.com |
a6c332215cf1ffb577a25b38324ce2b756b1d03e | d0d1160e7727bde0ebe7aa8e0deda2ac6cc57b3a | /proto/openconfig/reference/rpc/gnmi/gnmi_pb2.py | 01768b4b4cad78f48fa2cedf8050f8c9190d0355 | [
"Apache-2.0"
] | permissive | liusheng198933/PI | 9c0be8064d5e0d283d535a99ae2712121dd01d6f | 124ac3c45df72a4b93eee1664266e923f4b69545 | refs/heads/master | 2021-05-12T12:39:39.420972 | 2018-01-19T09:45:00 | 2018-01-19T09:45:00 | 117,418,492 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | true | 74,651 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: gnmi.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='gnmi.proto',
package='gnmi',
syntax='proto3',
serialized_pb=_b('\n\ngnmi.proto\x12\x04gnmi\x1a\x19google/protobuf/any.proto\x1a google/protobuf/descriptor.proto\"\x86\x01\n\x0cNotification\x12\x11\n\ttimestamp\x18\x01 \x01(\x03\x12\x1a\n\x06prefix\x18\x02 \x01(\x0b\x32\n.gnmi.Path\x12\r\n\x05\x61lias\x18\x03 \x01(\t\x12\x1c\n\x06update\x18\x04 \x03(\x0b\x32\x0c.gnmi.Update\x12\x1a\n\x06\x64\x65lete\x18\x05 \x03(\x0b\x32\n.gnmi.Path\"a\n\x06Update\x12\x18\n\x04path\x18\x01 \x01(\x0b\x32\n.gnmi.Path\x12\x1e\n\x05value\x18\x02 \x01(\x0b\x32\x0b.gnmi.ValueB\x02\x18\x01\x12\x1d\n\x03val\x18\x03 \x01(\x0b\x32\x10.gnmi.TypedValue\"\xce\x02\n\nTypedValue\x12\x14\n\nstring_val\x18\x01 \x01(\tH\x00\x12\x11\n\x07int_val\x18\x02 \x01(\x03H\x00\x12\x12\n\x08uint_val\x18\x03 \x01(\x04H\x00\x12\x12\n\x08\x62ool_val\x18\x04 \x01(\x08H\x00\x12\x13\n\tbytes_val\x18\x05 \x01(\x0cH\x00\x12\x13\n\tfloat_val\x18\x06 \x01(\x02H\x00\x12&\n\x0b\x64\x65\x63imal_val\x18\x07 \x01(\x0b\x32\x0f.gnmi.Decimal64H\x00\x12)\n\x0cleaflist_val\x18\x08 \x01(\x0b\x32\x11.gnmi.ScalarArrayH\x00\x12\'\n\x07\x61ny_val\x18\t \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x12\x12\n\x08json_val\x18\n \x01(\x0cH\x00\x12\x17\n\rjson_ietf_val\x18\x0b \x01(\x0cH\x00\x12\x13\n\tascii_val\x18\x0c \x01(\tH\x00\x42\x07\n\x05value\"\'\n\x04Path\x12\x0f\n\x07\x65lement\x18\x01 \x03(\t\x12\x0e\n\x06origin\x18\x02 \x01(\t\"8\n\x05Value\x12\r\n\x05value\x18\x01 \x01(\x0c\x12\x1c\n\x04type\x18\x02 \x01(\x0e\x32\x0e.gnmi.Encoding:\x02\x18\x01\"J\n\x05\x45rror\x12\x0c\n\x04\x63ode\x18\x01 \x01(\r\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\"\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x14.google.protobuf.Any\".\n\tDecimal64\x12\x0e\n\x06\x64igits\x18\x01 \x01(\x04\x12\x11\n\tprecision\x18\x02 \x01(\r\"0\n\x0bScalarArray\x12!\n\x07\x65lement\x18\x01 \x03(\x0b\x32\x10.gnmi.TypedValue\"\x8a\x01\n\x10SubscribeRequest\x12+\n\tsubscribe\x18\x01 \x01(\x0b\x32\x16.gnmi.SubscriptionListH\x00\x12\x1a\n\x04poll\x18\x03 \x01(\x0b\x32\n.gnmi.PollH\x00\x12\"\n\x07\x61liases\x18\x04 \x01(\x0b\x32\x0f.gnmi.AliasListH\x00\x42\t\n\x07request\"\x06\n\x04Poll\"|\n\x11SubscribeResponse\x12$\n\x06update\x18\x01 \x01(\x0b\x32\x12.gnmi.NotificationH\x00\x12\x17\n\rsync_response\x18\x03 \x01(\x08H\x00\x12\x1c\n\x05\x65rror\x18\x04 \x01(\x0b\x32\x0b.gnmi.ErrorH\x00\x42\n\n\x08response\"\xc1\x02\n\x10SubscriptionList\x12\x1a\n\x06prefix\x18\x01 \x01(\x0b\x32\n.gnmi.Path\x12(\n\x0csubscription\x18\x02 \x03(\x0b\x32\x12.gnmi.Subscription\x12\x13\n\x0buse_aliases\x18\x03 \x01(\x08\x12\x1d\n\x03qos\x18\x04 \x01(\x0b\x32\x10.gnmi.QOSMarking\x12)\n\x04mode\x18\x05 \x01(\x0e\x32\x1b.gnmi.SubscriptionList.Mode\x12\x19\n\x11\x61llow_aggregation\x18\x06 \x01(\x08\x12#\n\nuse_models\x18\x07 \x03(\x0b\x32\x0f.gnmi.ModelData\x12 \n\x08\x65ncoding\x18\x08 \x01(\x0e\x32\x0e.gnmi.Encoding\"&\n\x04Mode\x12\n\n\x06STREAM\x10\x00\x12\x08\n\x04ONCE\x10\x01\x12\x08\n\x04POLL\x10\x02\"\x9f\x01\n\x0cSubscription\x12\x18\n\x04path\x18\x01 \x01(\x0b\x32\n.gnmi.Path\x12$\n\x04mode\x18\x02 \x01(\x0e\x32\x16.gnmi.SubscriptionMode\x12\x17\n\x0fsample_interval\x18\x03 \x01(\x04\x12\x1a\n\x12suppress_redundant\x18\x04 \x01(\x08\x12\x1a\n\x12heartbeat_interval\x18\x05 \x01(\x04\"\x1d\n\nQOSMarking\x12\x0f\n\x07marking\x18\x01 \x01(\r\"0\n\x05\x41lias\x12\x18\n\x04path\x18\x01 \x01(\x0b\x32\n.gnmi.Path\x12\r\n\x05\x61lias\x18\x02 \x01(\t\"\'\n\tAliasList\x12\x1a\n\x05\x61lias\x18\x01 \x03(\x0b\x32\x0b.gnmi.Alias\"\x81\x01\n\nSetRequest\x12\x1a\n\x06prefix\x18\x01 \x01(\x0b\x32\n.gnmi.Path\x12\x1a\n\x06\x64\x65lete\x18\x02 \x03(\x0b\x32\n.gnmi.Path\x12\x1d\n\x07replace\x18\x03 \x03(\x0b\x32\x0c.gnmi.Update\x12\x1c\n\x06update\x18\x04 \x03(\x0b\x32\x0c.gnmi.Update\"\x84\x01\n\x0bSetResponse\x12\x1a\n\x06prefix\x18\x01 \x01(\x0b\x32\n.gnmi.Path\x12$\n\x08response\x18\x02 \x03(\x0b\x32\x12.gnmi.UpdateResult\x12 \n\x07message\x18\x03 \x01(\x0b\x32\x0b.gnmi.ErrorB\x02\x18\x01\x12\x11\n\ttimestamp\x18\x04 \x01(\x03\"\xc6\x01\n\x0cUpdateResult\x12\x15\n\ttimestamp\x18\x01 \x01(\x03\x42\x02\x18\x01\x12\x18\n\x04path\x18\x02 \x01(\x0b\x32\n.gnmi.Path\x12\x1c\n\x07message\x18\x03 \x01(\x0b\x32\x0b.gnmi.Error\x12(\n\x02op\x18\x04 \x01(\x0e\x32\x1c.gnmi.UpdateResult.Operation\"=\n\tOperation\x12\x0b\n\x07INVALID\x10\x00\x12\n\n\x06\x44\x45LETE\x10\x01\x12\x0b\n\x07REPLACE\x10\x02\x12\n\n\x06UPDATE\x10\x03\"\xef\x01\n\nGetRequest\x12\x1a\n\x06prefix\x18\x01 \x01(\x0b\x32\n.gnmi.Path\x12\x18\n\x04path\x18\x02 \x03(\x0b\x32\n.gnmi.Path\x12\'\n\x04type\x18\x03 \x01(\x0e\x32\x19.gnmi.GetRequest.DataType\x12 \n\x08\x65ncoding\x18\x05 \x01(\x0e\x32\x0e.gnmi.Encoding\x12#\n\nuse_models\x18\x06 \x03(\x0b\x32\x0f.gnmi.ModelData\";\n\x08\x44\x61taType\x12\x07\n\x03\x41LL\x10\x00\x12\n\n\x06\x43ONFIG\x10\x01\x12\t\n\x05STATE\x10\x02\x12\x0f\n\x0bOPERATIONAL\x10\x03\"S\n\x0bGetResponse\x12(\n\x0cnotification\x18\x01 \x03(\x0b\x32\x12.gnmi.Notification\x12\x1a\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x0b.gnmi.Error\"\x13\n\x11\x43\x61pabilityRequest\"\x82\x01\n\x12\x43\x61pabilityResponse\x12)\n\x10supported_models\x18\x01 \x03(\x0b\x32\x0f.gnmi.ModelData\x12+\n\x13supported_encodings\x18\x02 \x03(\x0e\x32\x0e.gnmi.Encoding\x12\x14\n\x0cgNMI_version\x18\x03 \x01(\t\"@\n\tModelData\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0corganization\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t*D\n\x08\x45ncoding\x12\x08\n\x04JSON\x10\x00\x12\t\n\x05\x42YTES\x10\x01\x12\t\n\x05PROTO\x10\x02\x12\t\n\x05\x41SCII\x10\x03\x12\r\n\tJSON_IETF\x10\x04*A\n\x10SubscriptionMode\x12\x12\n\x0eTARGET_DEFINED\x10\x00\x12\r\n\tON_CHANGE\x10\x01\x12\n\n\x06SAMPLE\x10\x02\x32\xe3\x01\n\x04gNMI\x12\x41\n\x0c\x43\x61pabilities\x12\x17.gnmi.CapabilityRequest\x1a\x18.gnmi.CapabilityResponse\x12*\n\x03Get\x12\x10.gnmi.GetRequest\x1a\x11.gnmi.GetResponse\x12*\n\x03Set\x12\x10.gnmi.SetRequest\x1a\x11.gnmi.SetResponse\x12@\n\tSubscribe\x12\x16.gnmi.SubscribeRequest\x1a\x17.gnmi.SubscribeResponse(\x01\x30\x01:3\n\x0cgnmi_service\x12\x1c.google.protobuf.FileOptions\x18\xe9\x07 \x01(\tB\x08\xca>\x05\x30.3.1b\x06proto3')
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ENCODING = _descriptor.EnumDescriptor(
name='Encoding',
full_name='gnmi.Encoding',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='JSON', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BYTES', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROTO', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ASCII', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JSON_IETF', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2825,
serialized_end=2893,
)
_sym_db.RegisterEnumDescriptor(_ENCODING)
Encoding = enum_type_wrapper.EnumTypeWrapper(_ENCODING)
_SUBSCRIPTIONMODE = _descriptor.EnumDescriptor(
name='SubscriptionMode',
full_name='gnmi.SubscriptionMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TARGET_DEFINED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ON_CHANGE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SAMPLE', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2895,
serialized_end=2960,
)
_sym_db.RegisterEnumDescriptor(_SUBSCRIPTIONMODE)
SubscriptionMode = enum_type_wrapper.EnumTypeWrapper(_SUBSCRIPTIONMODE)
JSON = 0
BYTES = 1
PROTO = 2
ASCII = 3
JSON_IETF = 4
TARGET_DEFINED = 0
ON_CHANGE = 1
SAMPLE = 2
GNMI_SERVICE_FIELD_NUMBER = 1001
gnmi_service = _descriptor.FieldDescriptor(
name='gnmi_service', full_name='gnmi.gnmi_service', index=0,
number=1001, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
_SUBSCRIPTIONLIST_MODE = _descriptor.EnumDescriptor(
name='Mode',
full_name='gnmi.SubscriptionList.Mode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STREAM', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ONCE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POLL', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1486,
serialized_end=1524,
)
_sym_db.RegisterEnumDescriptor(_SUBSCRIPTIONLIST_MODE)
_UPDATERESULT_OPERATION = _descriptor.EnumDescriptor(
name='Operation',
full_name='gnmi.UpdateResult.Operation',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='INVALID', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DELETE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REPLACE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPDATE', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2215,
serialized_end=2276,
)
_sym_db.RegisterEnumDescriptor(_UPDATERESULT_OPERATION)
_GETREQUEST_DATATYPE = _descriptor.EnumDescriptor(
name='DataType',
full_name='gnmi.GetRequest.DataType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ALL', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONFIG', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STATE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OPERATIONAL', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2459,
serialized_end=2518,
)
_sym_db.RegisterEnumDescriptor(_GETREQUEST_DATATYPE)
_NOTIFICATION = _descriptor.Descriptor(
name='Notification',
full_name='gnmi.Notification',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='gnmi.Notification.timestamp', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='prefix', full_name='gnmi.Notification.prefix', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='alias', full_name='gnmi.Notification.alias', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='update', full_name='gnmi.Notification.update', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='delete', full_name='gnmi.Notification.delete', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=82,
serialized_end=216,
)
_UPDATE = _descriptor.Descriptor(
name='Update',
full_name='gnmi.Update',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='gnmi.Update.path', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='gnmi.Update.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))),
_descriptor.FieldDescriptor(
name='val', full_name='gnmi.Update.val', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=218,
serialized_end=315,
)
_TYPEDVALUE = _descriptor.Descriptor(
name='TypedValue',
full_name='gnmi.TypedValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='string_val', full_name='gnmi.TypedValue.string_val', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='int_val', full_name='gnmi.TypedValue.int_val', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uint_val', full_name='gnmi.TypedValue.uint_val', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bool_val', full_name='gnmi.TypedValue.bool_val', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bytes_val', full_name='gnmi.TypedValue.bytes_val', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='float_val', full_name='gnmi.TypedValue.float_val', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='decimal_val', full_name='gnmi.TypedValue.decimal_val', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='leaflist_val', full_name='gnmi.TypedValue.leaflist_val', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='any_val', full_name='gnmi.TypedValue.any_val', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='json_val', full_name='gnmi.TypedValue.json_val', index=9,
number=10, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='json_ietf_val', full_name='gnmi.TypedValue.json_ietf_val', index=10,
number=11, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ascii_val', full_name='gnmi.TypedValue.ascii_val', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='value', full_name='gnmi.TypedValue.value',
index=0, containing_type=None, fields=[]),
],
serialized_start=318,
serialized_end=652,
)
_PATH = _descriptor.Descriptor(
name='Path',
full_name='gnmi.Path',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='element', full_name='gnmi.Path.element', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='origin', full_name='gnmi.Path.origin', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=654,
serialized_end=693,
)
_VALUE = _descriptor.Descriptor(
name='Value',
full_name='gnmi.Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='gnmi.Value.value', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='gnmi.Value.type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\030\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=695,
serialized_end=751,
)
_ERROR = _descriptor.Descriptor(
name='Error',
full_name='gnmi.Error',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='gnmi.Error.code', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='gnmi.Error.message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='gnmi.Error.data', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=753,
serialized_end=827,
)
_DECIMAL64 = _descriptor.Descriptor(
name='Decimal64',
full_name='gnmi.Decimal64',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='digits', full_name='gnmi.Decimal64.digits', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='precision', full_name='gnmi.Decimal64.precision', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=829,
serialized_end=875,
)
_SCALARARRAY = _descriptor.Descriptor(
name='ScalarArray',
full_name='gnmi.ScalarArray',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='element', full_name='gnmi.ScalarArray.element', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=877,
serialized_end=925,
)
_SUBSCRIBEREQUEST = _descriptor.Descriptor(
name='SubscribeRequest',
full_name='gnmi.SubscribeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='subscribe', full_name='gnmi.SubscribeRequest.subscribe', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='poll', full_name='gnmi.SubscribeRequest.poll', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aliases', full_name='gnmi.SubscribeRequest.aliases', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='request', full_name='gnmi.SubscribeRequest.request',
index=0, containing_type=None, fields=[]),
],
serialized_start=928,
serialized_end=1066,
)
_POLL = _descriptor.Descriptor(
name='Poll',
full_name='gnmi.Poll',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1068,
serialized_end=1074,
)
_SUBSCRIBERESPONSE = _descriptor.Descriptor(
name='SubscribeResponse',
full_name='gnmi.SubscribeResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='update', full_name='gnmi.SubscribeResponse.update', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sync_response', full_name='gnmi.SubscribeResponse.sync_response', index=1,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error', full_name='gnmi.SubscribeResponse.error', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='response', full_name='gnmi.SubscribeResponse.response',
index=0, containing_type=None, fields=[]),
],
serialized_start=1076,
serialized_end=1200,
)
_SUBSCRIPTIONLIST = _descriptor.Descriptor(
name='SubscriptionList',
full_name='gnmi.SubscriptionList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='prefix', full_name='gnmi.SubscriptionList.prefix', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='subscription', full_name='gnmi.SubscriptionList.subscription', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_aliases', full_name='gnmi.SubscriptionList.use_aliases', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='qos', full_name='gnmi.SubscriptionList.qos', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mode', full_name='gnmi.SubscriptionList.mode', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allow_aggregation', full_name='gnmi.SubscriptionList.allow_aggregation', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_models', full_name='gnmi.SubscriptionList.use_models', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encoding', full_name='gnmi.SubscriptionList.encoding', index=7,
number=8, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SUBSCRIPTIONLIST_MODE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1203,
serialized_end=1524,
)
_SUBSCRIPTION = _descriptor.Descriptor(
name='Subscription',
full_name='gnmi.Subscription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='gnmi.Subscription.path', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mode', full_name='gnmi.Subscription.mode', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sample_interval', full_name='gnmi.Subscription.sample_interval', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='suppress_redundant', full_name='gnmi.Subscription.suppress_redundant', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='heartbeat_interval', full_name='gnmi.Subscription.heartbeat_interval', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1527,
serialized_end=1686,
)
_QOSMARKING = _descriptor.Descriptor(
name='QOSMarking',
full_name='gnmi.QOSMarking',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='marking', full_name='gnmi.QOSMarking.marking', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1688,
serialized_end=1717,
)
_ALIAS = _descriptor.Descriptor(
name='Alias',
full_name='gnmi.Alias',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='gnmi.Alias.path', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='alias', full_name='gnmi.Alias.alias', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1719,
serialized_end=1767,
)
_ALIASLIST = _descriptor.Descriptor(
name='AliasList',
full_name='gnmi.AliasList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='alias', full_name='gnmi.AliasList.alias', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1769,
serialized_end=1808,
)
_SETREQUEST = _descriptor.Descriptor(
name='SetRequest',
full_name='gnmi.SetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='prefix', full_name='gnmi.SetRequest.prefix', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='delete', full_name='gnmi.SetRequest.delete', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='replace', full_name='gnmi.SetRequest.replace', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='update', full_name='gnmi.SetRequest.update', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1811,
serialized_end=1940,
)
_SETRESPONSE = _descriptor.Descriptor(
name='SetResponse',
full_name='gnmi.SetResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='prefix', full_name='gnmi.SetResponse.prefix', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response', full_name='gnmi.SetResponse.response', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='gnmi.SetResponse.message', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))),
_descriptor.FieldDescriptor(
name='timestamp', full_name='gnmi.SetResponse.timestamp', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1943,
serialized_end=2075,
)
_UPDATERESULT = _descriptor.Descriptor(
name='UpdateResult',
full_name='gnmi.UpdateResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='gnmi.UpdateResult.timestamp', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))),
_descriptor.FieldDescriptor(
name='path', full_name='gnmi.UpdateResult.path', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='gnmi.UpdateResult.message', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='op', full_name='gnmi.UpdateResult.op', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_UPDATERESULT_OPERATION,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2078,
serialized_end=2276,
)
_GETREQUEST = _descriptor.Descriptor(
name='GetRequest',
full_name='gnmi.GetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='prefix', full_name='gnmi.GetRequest.prefix', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path', full_name='gnmi.GetRequest.path', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='gnmi.GetRequest.type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encoding', full_name='gnmi.GetRequest.encoding', index=3,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_models', full_name='gnmi.GetRequest.use_models', index=4,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETREQUEST_DATATYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2279,
serialized_end=2518,
)
_GETRESPONSE = _descriptor.Descriptor(
name='GetResponse',
full_name='gnmi.GetResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='notification', full_name='gnmi.GetResponse.notification', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error', full_name='gnmi.GetResponse.error', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2520,
serialized_end=2603,
)
_CAPABILITYREQUEST = _descriptor.Descriptor(
name='CapabilityRequest',
full_name='gnmi.CapabilityRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2605,
serialized_end=2624,
)
_CAPABILITYRESPONSE = _descriptor.Descriptor(
name='CapabilityResponse',
full_name='gnmi.CapabilityResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='supported_models', full_name='gnmi.CapabilityResponse.supported_models', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='supported_encodings', full_name='gnmi.CapabilityResponse.supported_encodings', index=1,
number=2, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gNMI_version', full_name='gnmi.CapabilityResponse.gNMI_version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2627,
serialized_end=2757,
)
_MODELDATA = _descriptor.Descriptor(
name='ModelData',
full_name='gnmi.ModelData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='gnmi.ModelData.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='organization', full_name='gnmi.ModelData.organization', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='gnmi.ModelData.version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2759,
serialized_end=2823,
)
_NOTIFICATION.fields_by_name['prefix'].message_type = _PATH
_NOTIFICATION.fields_by_name['update'].message_type = _UPDATE
_NOTIFICATION.fields_by_name['delete'].message_type = _PATH
_UPDATE.fields_by_name['path'].message_type = _PATH
_UPDATE.fields_by_name['value'].message_type = _VALUE
_UPDATE.fields_by_name['val'].message_type = _TYPEDVALUE
_TYPEDVALUE.fields_by_name['decimal_val'].message_type = _DECIMAL64
_TYPEDVALUE.fields_by_name['leaflist_val'].message_type = _SCALARARRAY
_TYPEDVALUE.fields_by_name['any_val'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_TYPEDVALUE.oneofs_by_name['value'].fields.append(
_TYPEDVALUE.fields_by_name['string_val'])
_TYPEDVALUE.fields_by_name['string_val'].containing_oneof = _TYPEDVALUE.oneofs_by_name['value']
_TYPEDVALUE.oneofs_by_name['value'].fields.append(
_TYPEDVALUE.fields_by_name['int_val'])
_TYPEDVALUE.fields_by_name['int_val'].containing_oneof = _TYPEDVALUE.oneofs_by_name['value']
_TYPEDVALUE.oneofs_by_name['value'].fields.append(
_TYPEDVALUE.fields_by_name['uint_val'])
_TYPEDVALUE.fields_by_name['uint_val'].containing_oneof = _TYPEDVALUE.oneofs_by_name['value']
_TYPEDVALUE.oneofs_by_name['value'].fields.append(
_TYPEDVALUE.fields_by_name['bool_val'])
_TYPEDVALUE.fields_by_name['bool_val'].containing_oneof = _TYPEDVALUE.oneofs_by_name['value']
_TYPEDVALUE.oneofs_by_name['value'].fields.append(
_TYPEDVALUE.fields_by_name['bytes_val'])
_TYPEDVALUE.fields_by_name['bytes_val'].containing_oneof = _TYPEDVALUE.oneofs_by_name['value']
_TYPEDVALUE.oneofs_by_name['value'].fields.append(
_TYPEDVALUE.fields_by_name['float_val'])
_TYPEDVALUE.fields_by_name['float_val'].containing_oneof = _TYPEDVALUE.oneofs_by_name['value']
_TYPEDVALUE.oneofs_by_name['value'].fields.append(
_TYPEDVALUE.fields_by_name['decimal_val'])
_TYPEDVALUE.fields_by_name['decimal_val'].containing_oneof = _TYPEDVALUE.oneofs_by_name['value']
_TYPEDVALUE.oneofs_by_name['value'].fields.append(
_TYPEDVALUE.fields_by_name['leaflist_val'])
_TYPEDVALUE.fields_by_name['leaflist_val'].containing_oneof = _TYPEDVALUE.oneofs_by_name['value']
_TYPEDVALUE.oneofs_by_name['value'].fields.append(
_TYPEDVALUE.fields_by_name['any_val'])
_TYPEDVALUE.fields_by_name['any_val'].containing_oneof = _TYPEDVALUE.oneofs_by_name['value']
_TYPEDVALUE.oneofs_by_name['value'].fields.append(
_TYPEDVALUE.fields_by_name['json_val'])
_TYPEDVALUE.fields_by_name['json_val'].containing_oneof = _TYPEDVALUE.oneofs_by_name['value']
_TYPEDVALUE.oneofs_by_name['value'].fields.append(
_TYPEDVALUE.fields_by_name['json_ietf_val'])
_TYPEDVALUE.fields_by_name['json_ietf_val'].containing_oneof = _TYPEDVALUE.oneofs_by_name['value']
_TYPEDVALUE.oneofs_by_name['value'].fields.append(
_TYPEDVALUE.fields_by_name['ascii_val'])
_TYPEDVALUE.fields_by_name['ascii_val'].containing_oneof = _TYPEDVALUE.oneofs_by_name['value']
_VALUE.fields_by_name['type'].enum_type = _ENCODING
_ERROR.fields_by_name['data'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_SCALARARRAY.fields_by_name['element'].message_type = _TYPEDVALUE
_SUBSCRIBEREQUEST.fields_by_name['subscribe'].message_type = _SUBSCRIPTIONLIST
_SUBSCRIBEREQUEST.fields_by_name['poll'].message_type = _POLL
_SUBSCRIBEREQUEST.fields_by_name['aliases'].message_type = _ALIASLIST
_SUBSCRIBEREQUEST.oneofs_by_name['request'].fields.append(
_SUBSCRIBEREQUEST.fields_by_name['subscribe'])
_SUBSCRIBEREQUEST.fields_by_name['subscribe'].containing_oneof = _SUBSCRIBEREQUEST.oneofs_by_name['request']
_SUBSCRIBEREQUEST.oneofs_by_name['request'].fields.append(
_SUBSCRIBEREQUEST.fields_by_name['poll'])
_SUBSCRIBEREQUEST.fields_by_name['poll'].containing_oneof = _SUBSCRIBEREQUEST.oneofs_by_name['request']
_SUBSCRIBEREQUEST.oneofs_by_name['request'].fields.append(
_SUBSCRIBEREQUEST.fields_by_name['aliases'])
_SUBSCRIBEREQUEST.fields_by_name['aliases'].containing_oneof = _SUBSCRIBEREQUEST.oneofs_by_name['request']
_SUBSCRIBERESPONSE.fields_by_name['update'].message_type = _NOTIFICATION
_SUBSCRIBERESPONSE.fields_by_name['error'].message_type = _ERROR
_SUBSCRIBERESPONSE.oneofs_by_name['response'].fields.append(
_SUBSCRIBERESPONSE.fields_by_name['update'])
_SUBSCRIBERESPONSE.fields_by_name['update'].containing_oneof = _SUBSCRIBERESPONSE.oneofs_by_name['response']
_SUBSCRIBERESPONSE.oneofs_by_name['response'].fields.append(
_SUBSCRIBERESPONSE.fields_by_name['sync_response'])
_SUBSCRIBERESPONSE.fields_by_name['sync_response'].containing_oneof = _SUBSCRIBERESPONSE.oneofs_by_name['response']
_SUBSCRIBERESPONSE.oneofs_by_name['response'].fields.append(
_SUBSCRIBERESPONSE.fields_by_name['error'])
_SUBSCRIBERESPONSE.fields_by_name['error'].containing_oneof = _SUBSCRIBERESPONSE.oneofs_by_name['response']
_SUBSCRIPTIONLIST.fields_by_name['prefix'].message_type = _PATH
_SUBSCRIPTIONLIST.fields_by_name['subscription'].message_type = _SUBSCRIPTION
_SUBSCRIPTIONLIST.fields_by_name['qos'].message_type = _QOSMARKING
_SUBSCRIPTIONLIST.fields_by_name['mode'].enum_type = _SUBSCRIPTIONLIST_MODE
_SUBSCRIPTIONLIST.fields_by_name['use_models'].message_type = _MODELDATA
_SUBSCRIPTIONLIST.fields_by_name['encoding'].enum_type = _ENCODING
_SUBSCRIPTIONLIST_MODE.containing_type = _SUBSCRIPTIONLIST
_SUBSCRIPTION.fields_by_name['path'].message_type = _PATH
_SUBSCRIPTION.fields_by_name['mode'].enum_type = _SUBSCRIPTIONMODE
_ALIAS.fields_by_name['path'].message_type = _PATH
_ALIASLIST.fields_by_name['alias'].message_type = _ALIAS
_SETREQUEST.fields_by_name['prefix'].message_type = _PATH
_SETREQUEST.fields_by_name['delete'].message_type = _PATH
_SETREQUEST.fields_by_name['replace'].message_type = _UPDATE
_SETREQUEST.fields_by_name['update'].message_type = _UPDATE
_SETRESPONSE.fields_by_name['prefix'].message_type = _PATH
_SETRESPONSE.fields_by_name['response'].message_type = _UPDATERESULT
_SETRESPONSE.fields_by_name['message'].message_type = _ERROR
_UPDATERESULT.fields_by_name['path'].message_type = _PATH
_UPDATERESULT.fields_by_name['message'].message_type = _ERROR
_UPDATERESULT.fields_by_name['op'].enum_type = _UPDATERESULT_OPERATION
_UPDATERESULT_OPERATION.containing_type = _UPDATERESULT
_GETREQUEST.fields_by_name['prefix'].message_type = _PATH
_GETREQUEST.fields_by_name['path'].message_type = _PATH
_GETREQUEST.fields_by_name['type'].enum_type = _GETREQUEST_DATATYPE
_GETREQUEST.fields_by_name['encoding'].enum_type = _ENCODING
_GETREQUEST.fields_by_name['use_models'].message_type = _MODELDATA
_GETREQUEST_DATATYPE.containing_type = _GETREQUEST
_GETRESPONSE.fields_by_name['notification'].message_type = _NOTIFICATION
_GETRESPONSE.fields_by_name['error'].message_type = _ERROR
_CAPABILITYRESPONSE.fields_by_name['supported_models'].message_type = _MODELDATA
_CAPABILITYRESPONSE.fields_by_name['supported_encodings'].enum_type = _ENCODING
DESCRIPTOR.message_types_by_name['Notification'] = _NOTIFICATION
DESCRIPTOR.message_types_by_name['Update'] = _UPDATE
DESCRIPTOR.message_types_by_name['TypedValue'] = _TYPEDVALUE
DESCRIPTOR.message_types_by_name['Path'] = _PATH
DESCRIPTOR.message_types_by_name['Value'] = _VALUE
DESCRIPTOR.message_types_by_name['Error'] = _ERROR
DESCRIPTOR.message_types_by_name['Decimal64'] = _DECIMAL64
DESCRIPTOR.message_types_by_name['ScalarArray'] = _SCALARARRAY
DESCRIPTOR.message_types_by_name['SubscribeRequest'] = _SUBSCRIBEREQUEST
DESCRIPTOR.message_types_by_name['Poll'] = _POLL
DESCRIPTOR.message_types_by_name['SubscribeResponse'] = _SUBSCRIBERESPONSE
DESCRIPTOR.message_types_by_name['SubscriptionList'] = _SUBSCRIPTIONLIST
DESCRIPTOR.message_types_by_name['Subscription'] = _SUBSCRIPTION
DESCRIPTOR.message_types_by_name['QOSMarking'] = _QOSMARKING
DESCRIPTOR.message_types_by_name['Alias'] = _ALIAS
DESCRIPTOR.message_types_by_name['AliasList'] = _ALIASLIST
DESCRIPTOR.message_types_by_name['SetRequest'] = _SETREQUEST
DESCRIPTOR.message_types_by_name['SetResponse'] = _SETRESPONSE
DESCRIPTOR.message_types_by_name['UpdateResult'] = _UPDATERESULT
DESCRIPTOR.message_types_by_name['GetRequest'] = _GETREQUEST
DESCRIPTOR.message_types_by_name['GetResponse'] = _GETRESPONSE
DESCRIPTOR.message_types_by_name['CapabilityRequest'] = _CAPABILITYREQUEST
DESCRIPTOR.message_types_by_name['CapabilityResponse'] = _CAPABILITYRESPONSE
DESCRIPTOR.message_types_by_name['ModelData'] = _MODELDATA
DESCRIPTOR.enum_types_by_name['Encoding'] = _ENCODING
DESCRIPTOR.enum_types_by_name['SubscriptionMode'] = _SUBSCRIPTIONMODE
DESCRIPTOR.extensions_by_name['gnmi_service'] = gnmi_service
Notification = _reflection.GeneratedProtocolMessageType('Notification', (_message.Message,), dict(
DESCRIPTOR = _NOTIFICATION,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.Notification)
))
_sym_db.RegisterMessage(Notification)
Update = _reflection.GeneratedProtocolMessageType('Update', (_message.Message,), dict(
DESCRIPTOR = _UPDATE,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.Update)
))
_sym_db.RegisterMessage(Update)
TypedValue = _reflection.GeneratedProtocolMessageType('TypedValue', (_message.Message,), dict(
DESCRIPTOR = _TYPEDVALUE,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.TypedValue)
))
_sym_db.RegisterMessage(TypedValue)
Path = _reflection.GeneratedProtocolMessageType('Path', (_message.Message,), dict(
DESCRIPTOR = _PATH,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.Path)
))
_sym_db.RegisterMessage(Path)
Value = _reflection.GeneratedProtocolMessageType('Value', (_message.Message,), dict(
DESCRIPTOR = _VALUE,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.Value)
))
_sym_db.RegisterMessage(Value)
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), dict(
DESCRIPTOR = _ERROR,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.Error)
))
_sym_db.RegisterMessage(Error)
Decimal64 = _reflection.GeneratedProtocolMessageType('Decimal64', (_message.Message,), dict(
DESCRIPTOR = _DECIMAL64,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.Decimal64)
))
_sym_db.RegisterMessage(Decimal64)
ScalarArray = _reflection.GeneratedProtocolMessageType('ScalarArray', (_message.Message,), dict(
DESCRIPTOR = _SCALARARRAY,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.ScalarArray)
))
_sym_db.RegisterMessage(ScalarArray)
SubscribeRequest = _reflection.GeneratedProtocolMessageType('SubscribeRequest', (_message.Message,), dict(
DESCRIPTOR = _SUBSCRIBEREQUEST,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.SubscribeRequest)
))
_sym_db.RegisterMessage(SubscribeRequest)
Poll = _reflection.GeneratedProtocolMessageType('Poll', (_message.Message,), dict(
DESCRIPTOR = _POLL,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.Poll)
))
_sym_db.RegisterMessage(Poll)
SubscribeResponse = _reflection.GeneratedProtocolMessageType('SubscribeResponse', (_message.Message,), dict(
DESCRIPTOR = _SUBSCRIBERESPONSE,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.SubscribeResponse)
))
_sym_db.RegisterMessage(SubscribeResponse)
SubscriptionList = _reflection.GeneratedProtocolMessageType('SubscriptionList', (_message.Message,), dict(
DESCRIPTOR = _SUBSCRIPTIONLIST,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.SubscriptionList)
))
_sym_db.RegisterMessage(SubscriptionList)
Subscription = _reflection.GeneratedProtocolMessageType('Subscription', (_message.Message,), dict(
DESCRIPTOR = _SUBSCRIPTION,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.Subscription)
))
_sym_db.RegisterMessage(Subscription)
QOSMarking = _reflection.GeneratedProtocolMessageType('QOSMarking', (_message.Message,), dict(
DESCRIPTOR = _QOSMARKING,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.QOSMarking)
))
_sym_db.RegisterMessage(QOSMarking)
Alias = _reflection.GeneratedProtocolMessageType('Alias', (_message.Message,), dict(
DESCRIPTOR = _ALIAS,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.Alias)
))
_sym_db.RegisterMessage(Alias)
AliasList = _reflection.GeneratedProtocolMessageType('AliasList', (_message.Message,), dict(
DESCRIPTOR = _ALIASLIST,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.AliasList)
))
_sym_db.RegisterMessage(AliasList)
SetRequest = _reflection.GeneratedProtocolMessageType('SetRequest', (_message.Message,), dict(
DESCRIPTOR = _SETREQUEST,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.SetRequest)
))
_sym_db.RegisterMessage(SetRequest)
SetResponse = _reflection.GeneratedProtocolMessageType('SetResponse', (_message.Message,), dict(
DESCRIPTOR = _SETRESPONSE,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.SetResponse)
))
_sym_db.RegisterMessage(SetResponse)
UpdateResult = _reflection.GeneratedProtocolMessageType('UpdateResult', (_message.Message,), dict(
DESCRIPTOR = _UPDATERESULT,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.UpdateResult)
))
_sym_db.RegisterMessage(UpdateResult)
GetRequest = _reflection.GeneratedProtocolMessageType('GetRequest', (_message.Message,), dict(
DESCRIPTOR = _GETREQUEST,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.GetRequest)
))
_sym_db.RegisterMessage(GetRequest)
GetResponse = _reflection.GeneratedProtocolMessageType('GetResponse', (_message.Message,), dict(
DESCRIPTOR = _GETRESPONSE,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.GetResponse)
))
_sym_db.RegisterMessage(GetResponse)
CapabilityRequest = _reflection.GeneratedProtocolMessageType('CapabilityRequest', (_message.Message,), dict(
DESCRIPTOR = _CAPABILITYREQUEST,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.CapabilityRequest)
))
_sym_db.RegisterMessage(CapabilityRequest)
CapabilityResponse = _reflection.GeneratedProtocolMessageType('CapabilityResponse', (_message.Message,), dict(
DESCRIPTOR = _CAPABILITYRESPONSE,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.CapabilityResponse)
))
_sym_db.RegisterMessage(CapabilityResponse)
ModelData = _reflection.GeneratedProtocolMessageType('ModelData', (_message.Message,), dict(
DESCRIPTOR = _MODELDATA,
__module__ = 'gnmi_pb2'
# @@protoc_insertion_point(class_scope:gnmi.ModelData)
))
_sym_db.RegisterMessage(ModelData)
google_dot_protobuf_dot_descriptor__pb2.FileOptions.RegisterExtension(gnmi_service)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\312>\0050.3.1'))
_UPDATE.fields_by_name['value'].has_options = True
_UPDATE.fields_by_name['value']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
_VALUE.has_options = True
_VALUE._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\030\001'))
_SETRESPONSE.fields_by_name['message'].has_options = True
_SETRESPONSE.fields_by_name['message']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
_UPDATERESULT.fields_by_name['timestamp'].has_options = True
_UPDATERESULT.fields_by_name['timestamp']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
class gNMIStub(object):
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Capabilities = channel.unary_unary(
'/gnmi.gNMI/Capabilities',
request_serializer=CapabilityRequest.SerializeToString,
response_deserializer=CapabilityResponse.FromString,
)
self.Get = channel.unary_unary(
'/gnmi.gNMI/Get',
request_serializer=GetRequest.SerializeToString,
response_deserializer=GetResponse.FromString,
)
self.Set = channel.unary_unary(
'/gnmi.gNMI/Set',
request_serializer=SetRequest.SerializeToString,
response_deserializer=SetResponse.FromString,
)
self.Subscribe = channel.stream_stream(
'/gnmi.gNMI/Subscribe',
request_serializer=SubscribeRequest.SerializeToString,
response_deserializer=SubscribeResponse.FromString,
)
class gNMIServicer(object):
def Capabilities(self, request, context):
"""Capabilities allows the client to retrieve the set of capabilities that
is supported by the target. This allows the target to validate the
service version that is implemented and retrieve the set of models that
the target supports. The models can then be specified in subsequent RPCs
to restrict the set of data that is utilized.
Reference: gNMI Specification Section 3.2
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Get(self, request, context):
"""Retrieve a snapshot of data from the target. A Get RPC requests that the
target snapshots a subset of the data tree as specified by the paths
included in the message and serializes this to be returned to the
client using the specified encoding.
Reference: gNMI Specification Section 3.3
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Set(self, request, context):
"""Set allows the client to modify the state of data on the target. The
paths to modified along with the new values that the client wishes
to set the value to.
Reference: gNMI Specification Section 3.4
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Subscribe(self, request_iterator, context):
"""Subscribe allows a client to request the target to send it values
of particular paths within the data tree. These values may be streamed
at a particular cadence (STREAM), sent one off on a long-lived channel
(POLL), or sent as a one-off retrieval (ONCE).
Reference: gNMI Specification Section 3.5
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_gNMIServicer_to_server(servicer, server):
rpc_method_handlers = {
'Capabilities': grpc.unary_unary_rpc_method_handler(
servicer.Capabilities,
request_deserializer=CapabilityRequest.FromString,
response_serializer=CapabilityResponse.SerializeToString,
),
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=GetRequest.FromString,
response_serializer=GetResponse.SerializeToString,
),
'Set': grpc.unary_unary_rpc_method_handler(
servicer.Set,
request_deserializer=SetRequest.FromString,
response_serializer=SetResponse.SerializeToString,
),
'Subscribe': grpc.stream_stream_rpc_method_handler(
servicer.Subscribe,
request_deserializer=SubscribeRequest.FromString,
response_serializer=SubscribeResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'gnmi.gNMI', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetagNMIServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
def Capabilities(self, request, context):
"""Capabilities allows the client to retrieve the set of capabilities that
is supported by the target. This allows the target to validate the
service version that is implemented and retrieve the set of models that
the target supports. The models can then be specified in subsequent RPCs
to restrict the set of data that is utilized.
Reference: gNMI Specification Section 3.2
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Get(self, request, context):
"""Retrieve a snapshot of data from the target. A Get RPC requests that the
target snapshots a subset of the data tree as specified by the paths
included in the message and serializes this to be returned to the
client using the specified encoding.
Reference: gNMI Specification Section 3.3
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Set(self, request, context):
"""Set allows the client to modify the state of data on the target. The
paths to modified along with the new values that the client wishes
to set the value to.
Reference: gNMI Specification Section 3.4
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Subscribe(self, request_iterator, context):
"""Subscribe allows a client to request the target to send it values
of particular paths within the data tree. These values may be streamed
at a particular cadence (STREAM), sent one off on a long-lived channel
(POLL), or sent as a one-off retrieval (ONCE).
Reference: gNMI Specification Section 3.5
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetagNMIStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
def Capabilities(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Capabilities allows the client to retrieve the set of capabilities that
is supported by the target. This allows the target to validate the
service version that is implemented and retrieve the set of models that
the target supports. The models can then be specified in subsequent RPCs
to restrict the set of data that is utilized.
Reference: gNMI Specification Section 3.2
"""
raise NotImplementedError()
Capabilities.future = None
def Get(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Retrieve a snapshot of data from the target. A Get RPC requests that the
target snapshots a subset of the data tree as specified by the paths
included in the message and serializes this to be returned to the
client using the specified encoding.
Reference: gNMI Specification Section 3.3
"""
raise NotImplementedError()
Get.future = None
def Set(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Set allows the client to modify the state of data on the target. The
paths to modified along with the new values that the client wishes
to set the value to.
Reference: gNMI Specification Section 3.4
"""
raise NotImplementedError()
Set.future = None
def Subscribe(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None):
"""Subscribe allows a client to request the target to send it values
of particular paths within the data tree. These values may be streamed
at a particular cadence (STREAM), sent one off on a long-lived channel
(POLL), or sent as a one-off retrieval (ONCE).
Reference: gNMI Specification Section 3.5
"""
raise NotImplementedError()
def beta_create_gNMI_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('gnmi.gNMI', 'Capabilities'): CapabilityRequest.FromString,
('gnmi.gNMI', 'Get'): GetRequest.FromString,
('gnmi.gNMI', 'Set'): SetRequest.FromString,
('gnmi.gNMI', 'Subscribe'): SubscribeRequest.FromString,
}
response_serializers = {
('gnmi.gNMI', 'Capabilities'): CapabilityResponse.SerializeToString,
('gnmi.gNMI', 'Get'): GetResponse.SerializeToString,
('gnmi.gNMI', 'Set'): SetResponse.SerializeToString,
('gnmi.gNMI', 'Subscribe'): SubscribeResponse.SerializeToString,
}
method_implementations = {
('gnmi.gNMI', 'Capabilities'): face_utilities.unary_unary_inline(servicer.Capabilities),
('gnmi.gNMI', 'Get'): face_utilities.unary_unary_inline(servicer.Get),
('gnmi.gNMI', 'Set'): face_utilities.unary_unary_inline(servicer.Set),
('gnmi.gNMI', 'Subscribe'): face_utilities.stream_stream_inline(servicer.Subscribe),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_gNMI_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('gnmi.gNMI', 'Capabilities'): CapabilityRequest.SerializeToString,
('gnmi.gNMI', 'Get'): GetRequest.SerializeToString,
('gnmi.gNMI', 'Set'): SetRequest.SerializeToString,
('gnmi.gNMI', 'Subscribe'): SubscribeRequest.SerializeToString,
}
response_deserializers = {
('gnmi.gNMI', 'Capabilities'): CapabilityResponse.FromString,
('gnmi.gNMI', 'Get'): GetResponse.FromString,
('gnmi.gNMI', 'Set'): SetResponse.FromString,
('gnmi.gNMI', 'Subscribe'): SubscribeResponse.FromString,
}
cardinalities = {
'Capabilities': cardinality.Cardinality.UNARY_UNARY,
'Get': cardinality.Cardinality.UNARY_UNARY,
'Set': cardinality.Cardinality.UNARY_UNARY,
'Subscribe': cardinality.Cardinality.STREAM_STREAM,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'gnmi.gNMI', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| [
"liusheng198933@gmail.com"
] | liusheng198933@gmail.com |
d89ebcab0f970fc4bf36d21e6205b3316c25af4a | a8079efec61894fb6082986e66c4c146757fc895 | /src/__main__.py | 03f618d7b50266b668d1b471f5e62afc4edc2244 | [] | no_license | gsdlab/ClaferSMT | aaa5bd0c0c72f6a9b156529a871cced40e006cba | d8240b4503107641d62f7f913ebe50a88182d9a3 | refs/heads/master | 2021-01-16T21:23:22.838308 | 2015-08-20T00:24:54 | 2015-08-20T00:24:54 | 9,037,961 | 2 | 1 | null | 2018-08-21T13:48:02 | 2013-03-26T19:00:12 | TeX | UTF-8 | Python | false | false | 127 | py | '''
Created on Jul 10, 2014
@author: ezulkosk
'''
from front import ClaferRun
if __name__ == '__main__':
ClaferRun.main() | [
"ezulkosk@gsd.uwaterloo.ca"
] | ezulkosk@gsd.uwaterloo.ca |
39a174fca4efddf5f283fbbe9319ba79417cce5b | eac1fc1dcc4096ef2eaad920508b27c1bab4d5ea | /geotorch/pssdfixedrank.py | ef6ed7a635c49cb2a289dc23df629721006e5ba4 | [
"MIT"
] | permissive | hxj525279/geotorch | ddfa2ce32d5d6efa4a139d3ef7c4e2959ea67ed7 | 587edade84849b13498ff4137d16f164c9d5bffc | refs/heads/master | 2023-06-19T06:22:33.444612 | 2021-07-13T16:23:30 | 2021-07-13T16:23:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,426 | py | import torch
from .symmetric import SymF
from .fixedrank import softplus_epsilon, inv_softplus_epsilon
class PSSDFixedRank(SymF):
fs = {"softplus": (softplus_epsilon, inv_softplus_epsilon)}
def __init__(self, size, rank, f="softplus", triv="expm"):
r"""
Manifold of symmetric positive semidefinite matrices of rank :math:`r`.
Args:
size (torch.size): Size of the tensor to be parametrized
rank (int): Rank of the matrices.
It has to be less or equal to
:math:`\min(\texttt{size}[-1], \texttt{size}[-2])`
f (str or callable or pair of callables): Optional. Either:
- ``"softplus"``
- A callable that maps real numbers to the interval :math:`(0, \infty)`
- A pair of callables such that the first maps the real numbers to
:math:`(0, \infty)` and the second is a (right) inverse of the first
Default: ``"softplus"``
triv (str or callable): Optional.
A map that maps skew-symmetric matrices onto the orthogonal matrices
surjectively. This is used to optimize the :math:`Q` in the eigenvalue
decomposition. It can be one of ``["expm", "cayley"]`` or a custom
callable. Default: ``"expm"``
"""
super().__init__(size, rank, PSSDFixedRank.parse_f(f), triv)
@staticmethod
def parse_f(f):
if f in PSSDFixedRank.fs.keys():
return PSSDFixedRank.fs[f]
elif callable(f):
return f, None
elif isinstance(f, tuple) and callable(f[0]) and callable(f[1]):
return f
else:
raise ValueError(
"Argument f was not recognized and is "
"not callable or a pair of callables. "
"Should be one of {}. Found {}".format(list(PSSDFixedRank.fs.keys()), f)
)
def in_manifold_eigen(self, L, eps=1e-6):
r"""
Checks that an ordered vector of eigenvalues values is in the manifold.
For tensors with more than 1 dimension the first dimensions are
treated as batch dimensions.
Args:
L (torch.Tensor): Vector of eigenvalues
eps (float): Optional. Threshold at which the eigenvalues are
considered to be zero
Default: ``1e-6``
"""
return (
super().in_manifold_eigen(L, eps)
and (L[..., : self.rank] >= eps).all().item()
)
def sample(self, init_=torch.nn.init.xavier_normal_, factorized=True, eps=5e-6):
r"""
Returns a randomly sampled matrix on the manifold as
.. math::
WW^\intercal \qquad W_{i,j} \sim \texttt{init_}
If the sampled matrix has more than `self.rank` small singular values, the
smallest ones are clamped to be at least ``eps`` in absolute value.
The output of this method can be used to initialize a parametrized tensor as::
>>> layer = nn.Linear(20, 20)
>>> M = PSSD(layer.weight.size())
>>> geotorch.register_parametrization(layer, "weight", M)
>>> layer.weight = M.sample()
Args:
init\_ (callable): Optional.
A function that takes a tensor and fills it in place according
to some distribution. See
`torch.init <https://pytorch.org/docs/stable/nn.init.html>`_.
Default: ``torch.nn.init.xavier_normal_``
factorized (bool): Optional. Return the tuple :math:`(\Lambda, Q)` with an
eigenvalue decomposition of the sampled matrix. This can also be used
to initialize the layer.
Default: ``True``
eps (float): Optional. Minimum eigenvalue of the sampled matrix.
Default: ``5e-6``
"""
L, Q = super().sample(factorized=True, init_=init_)
with torch.no_grad():
# S >= 0, as given by torch.linalg.eigvalsh()
small = L < eps
L[small] = eps
if factorized:
return L, Q
else:
# Project onto the manifold
Qt = Q.transpose(-2, -1)
# Multiply the three of them as Q\LambdaQ^T
return Q @ (L.unsqueeze(-1).expand_as(Qt) * Qt)
| [
"lezcano-93@hotmail.com"
] | lezcano-93@hotmail.com |
f741345ccc1d0c73f7ba92302cd48c1d66f46f5a | 1c3609e3e66dfb68fd32d34aa9146168b1303fca | /portfolio/views.py | 6a17a4313648abd0a33307c8a48a887fa0300693 | [] | no_license | heyrun/django_portfolio | 7efea7d9ef6ee4679a706d5caa316ed3ed06256b | 62b67b558fd851e66ccad7dfc2c01d1e6935fc39 | refs/heads/master | 2021-05-23T09:59:52.669456 | 2020-04-05T12:41:54 | 2020-04-05T12:41:54 | 253,232,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | from django.shortcuts import render
from .models import Project
# Create your views here.
def home(request):
project = Project.objects.all()
return render(request, 'project/home.html', {'projects':project})
| [
"ebakeaaron@gmail.com"
] | ebakeaaron@gmail.com |
c184acd04a38602b5ded60490a7282e7461d0389 | 0ffef863534c181f8f9c37263b3766a9c87d1838 | /src/python/setup.py | e4f33702531d69c7be7c5b75d06784cf66f2d1fa | [
"MIT"
] | permissive | michaellass/psychrolib | 35614e7ca4cbb9c09ccfcff32b84fc5043e6034a | bd71cdd8205a5644718ef4384b4f14db17f258a7 | refs/heads/master | 2020-12-01T14:48:27.273203 | 2019-11-21T17:54:03 | 2019-11-21T17:54:03 | 230,667,376 | 0 | 0 | null | 2019-12-28T21:07:56 | 2019-12-28T21:07:55 | null | UTF-8 | Python | false | false | 579 | py | #!/usr/bin/env python
from distutils.core import setup
setup(name='PsychroLib',
version='2.3.0',
maintainer = 'The PsychroLib Developers',
description='Library of psychrometric functions to calculate thermodynamic properties of air',
author='D. Thevenard and D. Meyer',
author_email='didierthevenard@users.noreply.github.com',
url='https://github.com/psychrometrics/psychrolib',
license='MIT',
platforms = ['Windows', 'Linux', 'Solaris', 'Mac OS-X', 'Unix'],
python_requires='>=3.6',
py_modules=['psychrolib'],
) | [
"noreply@github.com"
] | michaellass.noreply@github.com |
e9de6d9c4c93883d9ec63c2b7902f466a89f039d | 7cb060ca55ef26f8460dbfb732f5c54f4dd56d22 | /chapter01/fraction.py | b51ac5a97b4ea5f07001e0fbd43d4c3aead49c18 | [] | no_license | yoga24/py-ds-algo | de50719edba19a4aee0975b2f8f2d24d718c1a49 | fea506ea240ee7be95c88df1a17771746dca052c | refs/heads/master | 2020-04-13T03:00:27.005538 | 2019-01-31T00:19:32 | 2019-01-31T00:19:32 | 162,918,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | def gcd(num1, num2):
while num1 % num2 != 0:
old_num1 = num1
old_num2 = num2
num1 = old_num2
num2 = old_num1 % old_num2
return num2
class Fraction:
def __init__(self, numerator, denominator) -> None:
if str(numerator).isdigit() and str(denominator).isdigit():
divisor = gcd(numerator, denominator)
self.num = numerator // divisor
self.den = denominator // divisor
else:
raise Exception('Numerator and Denominator should be int')
def __add__(self, other):
new_num = (self.num * other.den) + (self.den * other.num)
new_den = self.den * other.den
return Fraction(new_num, new_den)
def __sub__(self, other):
new_num = (self.num * other.den) - (self.den * other.num)
new_den = self.den * other.den
return Fraction(new_num, new_den)
def __mul__(self, other):
new_num = self.num * other.num
new_den = self.den * other.den
return Fraction(new_num, new_den)
def __truediv__(self, other):
new_num = self.num * other.den
new_den = self.den * other.num
return Fraction(new_num, new_den)
def __gt__(self, other):
return self.num/self.den > other.num/other.den
def __ge__(self, other):
return self.num/self.den >= other.num/other.den
def __lt__(self, other):
return self.num/self.den < other.num/other.den
def __le__(self, other):
return self.num/self.den <= other.num/other.den
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
return self.num == other.num and self.den == other.den
def __str__(self) -> str:
return str(self.num) + '/' + str(self.den)
def get_num(self):
return self.num
def get_den(self):
return self.den
f1 = Fraction(2, 3)
f2 = Fraction(2, 3)
print(f1.get_num())
print(f1.get_den())
print(f1 == f2)
print(f1 + f2)
print(f1 - f2)
print(f1 * f2)
print(f1 / f2)
print(Fraction(1, 2) / Fraction(2, 1))
f4 = Fraction(1, 'b')
| [
"yorganand@gmail.com"
] | yorganand@gmail.com |
2e92fd124092bb606c4af2b949bf986b8f0f39e0 | 33c6b5d9f1852ac878aada3eb646ac2eabc6b705 | /junk/users2/signals.py | 490388c9433b9bb4b0c0de06090ea7a91ecbb2b2 | [] | no_license | detalikota/trading2 | 3c0e4308f45a620fe4c034403143cf68dd16db9c | 1aaafb6bf7d304be1896efb5ea0963fc40893b23 | refs/heads/master | 2023-02-04T14:03:10.860410 | 2020-12-17T14:28:12 | 2020-12-17T14:28:12 | 313,858,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import Profile2
@receiver(post_save, sender=User) #when the user is saved - send this signal
def create_profile(sender, instance, created, **kwargs):
if created:
Profile2.objects.create(user=instance)
@receiver(post_save, sender=User) # save the profile
def save_profile(sender, instance, **kwargs):
instance.profile.save() | [
"detalikota@gmail.com"
] | detalikota@gmail.com |
7c853130aad5019099ef29c54968d5a106b81ca5 | 9c9813b15d6c99866b57cbef84ac70ae06b7a529 | /hyperparams.py | 66ab0e689274ec1cbc7304d924e8350ef4ec0c2f | [] | no_license | gr8joo/DSTC7-Keras | 9312af0ec53f320bcb03844aedb3ddf616a7fbbe | 07376f3b954017ba0091153ee6452b062e81ea0f | refs/heads/master | 2021-09-24T09:32:39.650183 | 2018-10-07T04:46:18 | 2018-10-07T04:46:18 | 141,435,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,513 | py | class hyper_parameters:
def __init__(self):
# General hyperparameters
self.vocab_size = 333456#326385#209825 ubuntu#4900 advising#321618
self.num_utterance_options = 100
# self.num_profile_options = 20
self.num_kb_options = 10
self.max_context_len = 600#1250 ubuntu #400 advising
self.max_utterance_len = 140#230 ubuntu #90 advising
self.max_kb_len = 200
# self.max_profile_len = 24
self.neg_inf = -9999
self.amplify_val = 5
#### Model speicific hyperparameters ####
# Dual encoder
self.de_rnn_dim = 64#256#300
self.de_embedding_dim = 300
# Memory network(n2n)
self.memn2n_rnn_dim = 234#256#64
self.memn2n_embedding_dim = 300
self.memn2n_drop_rate = 0.3
self.hops = 3
# CNN_1D
self.cnn_rnn_dim = 64
self.cnn_embedding_dim = 300
self.kernel_size = [2,3,4,5,6,7,8,9,10,11]
self.num_filters = 10
# self.cnn_drop_rate = 0.5
# Optimizer parameters
self.learning_rate = 0.001#0.0005
self.clip_norm = 1.0
self.batch_size = 32
# self.eval_batch_size = 10
self.num_epochs = 100
# Locations of vocab sets
self.embedding_path = '/ext2/dstc7/data/wiki_ubuntu_uKB_test_embedding_W.npy'
self.vocab_path = '/ext2/dstc7/data/ubuntu/ubuntu_uKB_test_lemma_vocab.txt'
# Locations of weights
# self.weights_path = '/tmp/weights/'
self.weights_path = '/home/hjhwang/Codes/dstc7-keras/weights/'
########## Data Variants ##########
# Advising
self.train_context_path = '/ext2/dstc7/train_data/train_advising_aKB_test12/train_context.npy'
self.train_context_speaker_path = '/ext2/dstc7/train_data/train_advising_aKB_test12/train_context_speaker.npy'
self.train_context_mask_path = '/ext2/dstc7/train_data/train_advising_aKB_test12/train_context_mask.npy'
self.train_context_len_path = '/ext2/dstc7/train_data/train_advising_aKB_test12/train_context_len.npy'
self.train_target_path = '/ext2/dstc7/train_data/train_advising/train_target.npy'
self.train_options_path = '/ext2/dstc7/train_data/train_advising_aKB_test12/train_options.npy'
self.train_options_len_path = '/ext2/dstc7/train_data/train_advising_aKB_test12/train_options_len.npy'
self.train_profile_path = '/ext2/dstc7/train_data/train_advising_aKB_test12/train_profile.npy'
self.train_context_profile_flag_path = '/ext2/dstc7/train_data/train_advising_aKB_test12/train_context_profile_flag.npy'
self.train_options_profile_flag_path = '/ext2/dstc7/train_data/train_advising_aKB_test12/train_options_profile_flag.npy'
self.train_kb_path = '/ext2/dstc7/train_data/train_advising_aKB_test12/train_KB.npy'
self.train_kb_flag_path = '/ext2/dstc7/train_data/train_advising_aKB_test12/train_KB_flag.npy'
self.train_kb_mask_path = '/ext2/dstc7/train_data/train_advising_aKB_test12/train_KB_mask.npy'
self.valid_context_path = '/ext2/dstc7/valid_data/valid_advising_aKB_test12/valid_context.npy'
self.valid_context_speaker_path = '/ext2/dstc7/valid_data/valid_advising_aKB_test12/valid_context_speaker.npy'
self.valid_context_mask_path = '/ext2/dstc7/valid_data/valid_advising_aKB_test12/valid_context_mask.npy'
self.valid_context_len_path = '/ext2/dstc7/valid_data/valid_advising_aKB_test12/valid_context_len.npy'
self.valid_target_path = '/ext2/dstc7/valid_data/valid_advising/valid_target.npy'
self.valid_options_path = '/ext2/dstc7/valid_data/valid_advising_aKB_test12/valid_options.npy'
self.valid_options_len_path = '/ext2/dstc7/valid_data/valid_advising_aKB_test12/valid_options_len.npy'
self.valid_profile_path = '/ext2/dstc7/valid_data/valid_advising_aKB_test12/valid_profile.npy'
self.valid_context_profile_flag_path = '/ext2/dstc7/valid_data/valid_advising_aKB_test12/valid_context_profile_flag.npy'
self.valid_options_profile_flag_path = '/ext2/dstc7/valid_data/valid_advising_aKB_test12/valid_options_profile_flag.npy'
self.valid_kb_path = '/ext2/dstc7/valid_data/valid_advising_aKB_test12/valid_KB.npy'
self.valid_kb_flag_path = '/ext2/dstc7/valid_data/valid_advising_aKB_test12/valid_KB_flag.npy'
self.valid_kb_mask_path = '/ext2/dstc7/valid_data/valid_advising_aKB_test12/valid_KB_mask.npy'
'''
# Ubuntu
self.train_context_path = '/ext2/dstc7/train_data/train_ubuntu_uKB_test/train_context.npy'# 'valid_data/valid_context.npy'#
self.train_context_speaker_path = '/ext2/dstc7/train_data/train_ubuntu_uKB_test/train_context_speaker.npy'# 'train_data/context_speaker.train'#
self.train_context_mask_path = '/ext2/dstc7/train_data/train_ubuntu_uKB_test/train_context_mask.npy'
self.train_context_len_path = '/ext2/dstc7/train_data/train_ubuntu_uKB_test/train_context_len.npy'# 'valid_data/valid_context_len.npy'#
self.train_target_path = '/ext2/dstc7/train_data/train_ubuntu/train_target.npy'# 'valid_data/valid_advising/valid_target.npy'#
self.train_options_path = '/ext2/dstc7/train_data/train_ubuntu_uKB_test/train_options.npy'# 'valid_data/valid_options.npy'#
self.train_options_len_path = '/ext2/dstc7/train_data/train_ubuntu_uKB_test/train_options_len.npy'# 'valid_data/valid_options_len.npy'#
self.train_profile_path = '/ext2/dstc7/train_data/train_ubuntu_uKB_test/train_profile.npy'
self.train_kb_path = '/ext2/dstc7/train_data/train_ubuntu_uKB_test/train_KB.npy'
self.train_kb_flag_path = '/ext2/dstc7/train_data/train_ubuntu_uKB_test/train_KB_flag.npy'#'train_data/train_ubuntu_uKB_test/train_kb_entity_flags_path'
self.train_kb_mask_path = '/ext2/dstc7/train_data/train_ubuntu_uKB_test/train_KB_mask.npy'
self.valid_context_path = '/ext2/dstc7/valid_data/valid_ubuntu_uKB_test/valid_context.npy'
self.valid_context_speaker_path = '/ext2/dstc7/valid_data/valid_ubuntu_uKB_test/valid_context_speaker.npy'# 'valid_data/context_speaker.valid'#
self.valid_context_mask_path = '/ext2/dstc7/valid_data/valid_ubuntu_uKB_test/valid_context_mask.npy'
self.valid_context_len_path = '/ext2/dstc7/valid_data/valid_ubuntu_uKB_test/valid_context_len.npy'
self.valid_target_path = '/ext2/dstc7/valid_data/valid_ubuntu/valid_target.npy'
self.valid_options_path = '/ext2/dstc7/valid_data/valid_ubuntu_uKB_test/valid_options.npy'
self.valid_options_len_path = '/ext2/dstc7/valid_data/valid_ubuntu_uKB_test/valid_options_len.npy'
self.valid_profile_path = '/ext2/dstc7/valid_data/valid_ubuntu_uKB_test/valid_profile.npy'
self.valid_kb_path = '/ext2/dstc7/valid_data/valid_ubuntu_uKB_test/valid_KB.npy'
self.valid_kb_flag_path = '/ext2/dstc7/valid_data/valid_ubuntu_uKB_test/valid_KB_flag.npy'#'valid_data/train_ubuntu_uKB_test/valid_kb_entity_flags_path'
self.valid_kb_mask_path = '/ext2/dstc7/valid_data/valid_ubuntu_uKB_test/valid_KB_mask.npy'
self.test_context_path = '/ext2/dstc7/test_data/test_ubuntu_uKB_test/test_context.npy'# 'valid_data/context.valid'#
self.test_context_speaker_path = '/ext2/dstc7/test_data/test_ubuntu_uKB_test/test_context_speaker.npy'# 'valid_data/context_speaker.valid'#
self.test_context_mask_path = '/ext2/dstc7/test_data/test_ubuntu_uKB_test/test_context_mask.npy'# 'valid_data/context_speaker.valid'#
self.test_context_len_path = '/ext2/dstc7/test_data/test_ubuntu_uKB_test/test_context_len.npy'
self.test_target_path = '/ext2/dstc7/test_data/test_ubuntu_uKB_test/test_target.npy'
self.test_options_path = '/ext2/dstc7/test_data/test_ubuntu_uKB_test/test_options.npy'# 'valid_data/options.valid'#
self.test_options_len_path = '/ext2/dstc7/test_data/test_ubuntu_uKB_test/test_options_len.npy'
self.test_profile_path = '/ext2/dstc7/test_data/test_ubuntu_uKB_test/test_profile.npy'
self.test_kb_path = '/ext2/dstc7/test_data/test_ubuntu_uKB_test/test_KB.npy'
self.test_kb_flag_path = '/ext2/dstc7/test_data/test_ubuntu_uKB_test/test_KB_flag.npy'#'valid_data/train_ubuntu_uKB_test/valid_kb_entity_flags_path'
self.test_kb_mask_path = '/ext2/dstc7/test_data/test_ubuntu_uKB_test/test_KB_mask.npy'
'''
def create_hyper_parameters():
return hyper_parameters()
| [
"hjhwang@ai.kaist.ac.kr"
] | hjhwang@ai.kaist.ac.kr |
ee0dce139dd669791b8ca2463966f8c75ed78303 | 78cfcb8b8c9179ac3ba51abe4f32b06e47107028 | /machine-learning-ex1/plotData.py | 76f8f96b7c19ef4986a2f6cc7dba911967934ec6 | [] | no_license | caiskdbk/Coursera-ML-in-Python | 060981c1d5ce6919c86ad9b0b52be32fa9f340c2 | 90346bb556596f7f7e0da1d2c31c0a79d040da70 | refs/heads/master | 2020-05-01T13:43:43.502488 | 2019-03-26T04:18:20 | 2019-03-26T04:18:20 | 137,133,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py |
'''
PLOTDATA Plots the data points x and y into a new figure
PLOTDATA(x,y) plots the data points and gives the figure axes labels of population and profit.
'''
import matplotlib.pyplot as plt
def plotData(X,Y):
# ====================== YOUR CODE HERE ======================
# Instructions: Plot the training data into a figure.
# Set the axes labels using the "xlabel" and "ylabel" .
# Assume the population and revenue data have been passed in
# as the x and y arguments of this function.
#
plt.scatter(X,Y,c='red',marker='X')
plt.xlabel("Population of City in 10,000s")
plt.ylabel("Profit in $10,000s")
plt.show(block=False)
plt.pause(5)
plt.close()
# ============================================================ | [
"caiskdbk@gmail.com"
] | caiskdbk@gmail.com |
0b34b5732a18165fbcd70164b4c2648ea5eaeeb0 | 661b6c3d0d2344f86ed126d9b4f6f10c0d9c630b | /track/admin.py | 1d2bcc9fdd717c1e4f90db9346add4da60c66ec6 | [] | no_license | Sababa123/activity_tracker | 69eae58dbbf7523dcc144d3f05f6952cc4e4225b | 7c5e2c83e5fc76f8c9a2c5f58569ed92c9eb4421 | refs/heads/master | 2020-06-25T17:43:14.321638 | 2019-08-11T21:45:16 | 2019-08-11T21:45:16 | 199,380,581 | 0 | 0 | null | 2019-07-29T04:53:25 | 2019-07-29T04:53:24 | null | UTF-8 | Python | false | false | 175 | py | from django.contrib import admin
from .models import Activity, ActivityTracker
# Register your models here.
admin.site.register(Activity)
admin.site.register(ActivityTracker) | [
"tahirs95@hotmail.com"
] | tahirs95@hotmail.com |
7b15c3a5d2060f2149607d1ee7ea040fb35c2eb7 | 913fb9ec1e709a5140676ba7b2371b1976afca72 | /seqPeaks/mirrorPeaks.py | db673bb1b55f332087c53b7b17d7dc5e614e6de3 | [] | no_license | cgreer/ResearchScripts | 171cfe9555ea06fdeb91084c12d07d1b45a2335c | 1107803bb1459d6b6e1dfb1a89679d2b6fd49062 | refs/heads/master | 2016-09-05T10:43:19.090247 | 2012-04-12T21:38:11 | 2012-04-12T21:38:11 | 1,673,080 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | import cgPeaks
import compareData as compare
import math
import bioLibCG as cg
knowns = compare.tccFileToList('mouseKnownMirs.tcc', 0)
eLevels = []
for known in knowns:
chrom, strand, start, end = cg.tccSplit(known, True) #text...
if strand == '1':
strand = '-1'
else:
strand = '1'
oppTcc = cg.makeTcc(chrom, strand, start, end)
knownStretch = cgPeaks.stretch(known)
knownStretch.createPeaks(1,20)
kPos = knownStretch.getHighestPeak()
if kPos: eLevels.append(knownStretch.profile[kPos])
oppStretch = cgPeaks.stretch(oppTcc)
oppStretch.createPeaks(1,20)
oPos = oppStretch.getHighestPeak()
if oPos and kPos:
#determine if they are close enough to be considered mirrored...
if math.fabs(int(kPos) - int(oPos)) < 12:
print known, oPos, kPos, oppStretch.profile[oPos], knownStretch.profile[kPos]
print eLevels
| [
"chrisgreer88@gmail.com"
] | chrisgreer88@gmail.com |
6044f54eee3256ff3c1bfc8315828e03212e9021 | 278f6dac70d4f09a4e5e4a676e26ca27ef7556f0 | /Route_prediction/error.py | 27e4b1a69aa2447a370f615e32007f9f9ff6a65b | [
"Apache-2.0"
] | permissive | Saumya-Suvarna/machine-learning | 723b946e6d99f7fa6fe1ee3c92af01e8a1ab2f2a | d35cc1fc64d83d6d199461e296830e72daa75568 | refs/heads/master | 2020-06-02T19:34:56.916383 | 2017-10-22T21:04:31 | 2017-10-22T21:04:31 | 94,104,664 | 1 | 2 | null | 2017-10-22T21:04:32 | 2017-06-12T14:20:16 | Jupyter Notebook | UTF-8 | Python | false | false | 1,067 | py | from theano import tensor
import theano
import numpy
def const(v):
if theano.config.floatX == 'float32':
return numpy.float32(v)
else:
return numpy.float64(v)
rearth = const(6371)
deg2rad = const(3.141592653589793 / 180)
def hdist(a, b):
lat1 = a[:, 0] * deg2rad
lon1 = a[:, 1] * deg2rad
lat2 = b[:, 0] * deg2rad
lon2 = b[:, 1] * deg2rad
dlat = abs(lat1-lat2)
dlon = abs(lon1-lon2)
al = tensor.sin(dlat/2)**2 + tensor.cos(lat1) * tensor.cos(lat2) * (tensor.sin(dlon/2)**2)
d = tensor.arctan2(tensor.sqrt(al), tensor.sqrt(const(1)-al))
hd = const(2) * rearth * d
return tensor.switch(tensor.eq(hd, float('nan')), (a-b).norm(2, axis=1), hd)
def erdist(a, b):
lat1 = a[:, 0] * deg2rad
lon1 = a[:, 1] * deg2rad
lat2 = b[:, 0] * deg2rad
lon2 = b[:, 1] * deg2rad
x = (lon2-lon1) * tensor.cos((lat1+lat2)/2)
y = (lat2-lat1)
return tensor.sqrt(tensor.sqr(x) + tensor.sqr(y)) * rearth
def rmsle(a, b):
return tensor.sqrt( ( (tensor.log(a+1)-tensor.log(b+1)) ** 2 ).mean() )
| [
"saumya suvarna"
] | saumya suvarna |
b1090161823f1670f0cd235d50c84f2f4487efc5 | 2e5e7d1aea7b7cf735215de21ab82fdf013f8b6b | /main/migrations/0008_wordanalytics.py | 7a8a75356facadf1caa95898fdeb4f089554fd6c | [] | no_license | motionrus/bookmark | 67c01a7cf4da4b43fe4dd98e1bbb11572dbf23cd | 9f2009b3a05fd0c3e8b14cafc31c1a8ea4161297 | refs/heads/master | 2023-04-29T15:00:32.756740 | 2019-11-05T20:02:03 | 2019-11-05T20:02:03 | 114,532,804 | 0 | 0 | null | 2023-04-19T18:35:36 | 2017-12-17T12:03:16 | HTML | UTF-8 | Python | false | false | 734 | py | # Generated by Django 2.0 on 2018-01-18 07:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0007_auto_20180113_1744'),
]
operations = [
migrations.CreateModel(
name='WordAnalytics',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pub_date', models.DateTimeField()),
('positions', models.TextField(default='', max_length=500)),
('bookmark', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.BookMark')),
],
),
]
| [
"stepanovkd@yandex.ru"
] | stepanovkd@yandex.ru |
27cdb855301a65a87fb5d1884fa1b8db76d7ac43 | ebacfd89347327f1800a8e33ce9654ec3541cc63 | /007/10001st-prime.py | 23ae8e02913925093362032ef13ba3df5ba1978e | [
"BSD-3-Clause"
] | permissive | ADWright18/Euler | 3ac0f684489f9d3ab6f3af9a1af51cd23ad38809 | 3f3dbbb60e89cd5ea5dfad560f796a763df0bbd8 | refs/heads/master | 2021-09-06T11:44:59.749544 | 2018-02-06T06:30:13 | 2018-02-06T06:30:13 | 115,306,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | # Author: Adomous Wright
# Problem 007
# What is the 10001st prime number?
import math
primes = []
x = 0;
while len(primes) < 10001:
x += 1
if len(primes) == 0:
primes.append(1)
print('Added: ' + str(x))
print('Number of Primes; ' + str(len(primes)))
elif len(primes) == 1:
primes.append(2)
print('Added: ' + str(x))
print('Number of Primes; ' + str(len(primes)))
else:
for prime in primes:
if prime == 1:
continue
elif x % prime == 0:
break
elif prime > math.sqrt(x):
primes.append(x)
print('Added: ' + str(x))
print('Number of Primes; ' + str(len(primes)))
break
elif primes.index(prime) == len(primes) - 1:
primes.append(x)
print('Added: ' + str(x))
print('Number of Primes; ' + str(len(primes)))
break
print('10001st Prime Number: ' + str(x))
| [
"adomous.wright@uconn.edu"
] | adomous.wright@uconn.edu |
113f2aeb9ba582a085e977d64df0240587c81645 | 5c5e7b03c3373e6217665842f542ca89491290ff | /2016/day18.py | b5a92f12f811a0ccb9b0c88bae32c9802f1ce21c | [] | no_license | incnone/AdventOfCode | 9c35214e338e176b6252e52a25a0141a01e290c8 | 29eac5d42403141fccef3c3ddbb986e01c89a593 | refs/heads/master | 2022-12-21T21:54:02.058024 | 2022-12-15T17:33:58 | 2022-12-15T17:33:58 | 229,338,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | from getinput import get_input
def parse_input(s):
return [True if c == '^' else False for c in s]
def next_trap_row(s):
next_row = [s[1]]
next_row += [(s1 and not s2) or (not s1 and s2) for s1, s2 in zip(s, s[2:])]
next_row += [s[-2]]
return next_row
def generate_traps(init_row, num_rows):
traps = [init_row]
for _ in range(num_rows - 1):
traps.append(next_trap_row(traps[-1]))
return traps
def trap_str(traps):
return '\n'.join(''.join('^' if c else '.' for c in line) for line in traps)
def part_1(trap_row):
traps = generate_traps(trap_row, 40)
return sum(sum(1 for x in line if not x) for line in traps)
def part_2(trap_row):
traps = generate_traps(trap_row, 400000)
return sum(sum(1 for x in line if not x) for line in traps)
if __name__ == "__main__":
the_trap_list = parse_input(get_input(18))
print('Part 1:', part_1(the_trap_list))
print('Part 2:', part_2(the_trap_list))
| [
"incohatus.none+git@gmail.com"
] | incohatus.none+git@gmail.com |
58ba1037fddf647624e53959b0ab45e4d4ee5099 | 0f1185636d97d0e9648025585b90f7fa7ca279f9 | /most-likely-patterns/utility.py | f1e3cdcc3ebffd33d73404b87382f69399203e75 | [] | no_license | wsgan001/myscope-sequential-analysis | 9eeca51b45b7c5bd33b4c0d24e058d20f46ac169 | 5901342695a998b56d2465ce69feab527b14f99b | refs/heads/master | 2020-03-31T02:02:52.848226 | 2018-09-18T18:34:17 | 2018-09-18T18:34:17 | 151,805,857 | 1 | 0 | null | 2018-10-06T04:48:53 | 2018-10-06T04:48:53 | null | UTF-8 | Python | false | false | 13,471 | py | import numpy as np
import random
import re
import operator
from hmmlearn import hmm
from keras.preprocessing.sequence import pad_sequences
from imblearn.over_sampling import SMOTE
from collections import Counter
from imblearn.under_sampling import ClusterCentroids
###################################################################
# read codebook from file
def loadCodeBook(codebook_filename):
codebook = []
with open(codebook_filename, "r") as filestream:
for line in filestream:
codebook.append(line.replace("\n", ""))
return codebook
###########################################################################################
def loadCodeBookFromTrainingFile(fileLocation):
all_code_map = {}
success_code_map = {}
unsuccess_code_map = {}
with open(fileLocation, "r") as filestream:
for line in filestream:
l = re.sub(r"\s+", "", line).split(",")
if l[len(l) - 1] == "500":
for i in range(0, len(l) - 1):
success_code_map[l[i]] = l[i]
all_code_map[l[i]] = l[i]
elif l[len(l) - 1] == "400":
for i in range(0, len(l) - 1):
unsuccess_code_map[l[i]] = l[i]
all_code_map[l[i]] = l[i]
success_sorted_map = sorted(success_code_map.items(), key=operator.itemgetter(0))
unsuccess_sorted_map = sorted(unsuccess_code_map.items(), key=operator.itemgetter(0))
all_sorted_map = sorted(all_code_map.items(), key=operator.itemgetter(0))
success_map = []
unsuccess_map = []
all_map = []
for key, value in all_sorted_map:
all_map.append(key)
for key, value in success_sorted_map:
success_map.append(key)
for key, value in unsuccess_sorted_map:
unsuccess_map.append(key)
return all_map, success_map, unsuccess_map
###########################################################################################
def loadData(fileLocation, codebook, flag):
code_to_int = dict((c, i) for i, c in enumerate(codebook))
seq = []
seq_label = []
lengths = []
map = {}
with open(fileLocation, "r") as filestream:
for line in filestream:
l = re.sub(r"\s+", "", line).split(",")
if (flag == 1 and l[len(l) - 1] == "500") or (flag == 0 and l[len(l) - 1] == "400"):
for i in range(0, len(l) - 1):
seq.append([code_to_int[l[i]]])
map[l[i]] = l[i]
lengths.append(len(l) - 1)
seq_label.append(l[len(l) - 1])
elif flag == 2:
var_len = 0
for i in range(0, len(l) - 1):
if l[i] in code_to_int.keys():
seq.append([code_to_int[l[i]]])
map[l[i]] = l[i]
var_len += 1
lengths.append(var_len)
seq_label.append(l[len(l) - 1])
# sorted_map = sorted(map.items(), key=operator.itemgetter(0))
# for key, value in sorted_map:
# print key
return np.array(seq), np.array(seq_label), np.array(lengths)
############################################################################################
def getHMMModel(n_states, n_observations, sequences, seq_lengths):
start_probability = np.ones(n_states)
start_probability = start_probability / n_states
transition_probability = np.ones(n_states * n_states).reshape(n_states, n_states)
transition_probability = transition_probability / n_states
emission_probability = np.ones(n_states * n_observations).reshape(n_states, n_observations)
emission_probability = emission_probability / n_observations
# create model and set initial values
model = hmm.MultinomialHMM(n_components=n_states, random_state=42)
model.startprob_ = start_probability
model.transmat_ = transition_probability
model.emissionprob_ = emission_probability
# fit model
model = model.fit(sequences, seq_lengths)
return model
#############################################################################################
def getMacroAveragePerformance(actual, predicted):
precision = 0.0
recall = 0.0
f_measure = 0.0
accuracy = 0.0
labels = ["500", "400", "400", "500"]
for k in [0, 2]:
tp = 0
fp = 0
tn = 0
fn = 0
for i in range(0, len(actual)):
if actual[i] == predicted[i] and actual[i] == labels[k]:
tp += 1
elif actual[i] != predicted[i] and actual[i] == labels[k]:
fn += 1
elif actual[i] == predicted[i] and actual[i] == labels[k + 1]:
tn += 1
elif actual[i] != predicted[i] and actual[i] == labels[k + 1]:
fp += 1
local_precision = (float(tp) / (tp + fp))
local_recall = (float(tp) / (tp + fn))
local_f_measure = (float(2 * local_precision * local_recall) / (local_precision + local_recall))
accuracy = (float(tp + tn) / (tp + fp + tn + fn))
# for checking calculation
# print tp, fp, tn, fn
# print local_accuracy, local_precision, local_recall, local_f_measure
precision += local_precision
recall += local_recall
f_measure += local_f_measure
return accuracy, precision / 2, recall / 2, f_measure / 2
#############################################################################################
def getMicroAveragePerformance(actual, predicted):
precision = 0.0
recall = 0.0
f_measure = 0.0
accuracy = 0.0
total_sample = len(actual)
labels = ["500", "400", "400", "500"]
for k in [0, 2]:
tp = 0
fp = 0
tn = 0
fn = 0
for i in range(0, len(actual)):
if actual[i] == predicted[i] and actual[i] == labels[k]:
tp += 1
elif actual[i] != predicted[i] and actual[i] == labels[k]:
fn += 1
elif actual[i] == predicted[i] and actual[i] == labels[k + 1]:
tn += 1
elif actual[i] != predicted[i] and actual[i] == labels[k + 1]:
fp += 1
local_precision = (float(tp) / (tp + fp))
local_recall = (float(tp) / (tp + fn))
local_f_measure = (float(2 * local_precision * local_recall) / (local_precision + local_recall))
accuracy = (float(tp + tn) / (tp + fp + tn + fn))
# for checking calculation
# print tp, fp, tn, fn
# print local_accuracy, local_precision, local_recall, local_f_measure
precision += (local_precision * (float(tp + fn) / total_sample))
recall += (local_recall * (float(tp + fn) / total_sample))
f_measure += (local_f_measure * (float(tp + fn) / total_sample))
return accuracy, precision, recall, f_measure
#############################################################################################
def createTrainAndTestFile(data, kFolds, training_filename, testing_filename):
foldSize = len(data) / kFolds
random.shuffle(data)
test = data[:foldSize]
train = data[foldSize:]
with open(training_filename, "w") as output:
for x in train:
output.write(x)
with open(testing_filename, "w") as output:
for x in test:
output.write(x)
#############################################################################################
def readAllData(data_filename):
data = []
with open(data_filename, "r") as filestream:
for line in filestream:
data.append(line)
return data
#############################################################################################
def AUC(y_true, y_pred):
not_y_pred = np.logical_not(y_pred)
y_int1 = y_true * y_pred
y_int0 = np.logical_not(y_true) * not_y_pred
TP = np.sum(y_pred * y_int1)
FP = np.sum(y_pred) - TP
TN = np.sum(not_y_pred * y_int0)
FN = np.sum(not_y_pred) - TN
TPR = np.float(TP) / (TP + FN)
FPR = np.float(FP) / (FP + TN)
return ((1 + TPR - FPR) / 2)
#######################################################################
# create startified folds for cross validation
def createStartifiedFolds(codebook, kFolds=10):
folds = []
success = []
unsuccess = []
max_len = 0
len_tmp = 0
code_to_int = dict((c, i + 1) for i, c in enumerate(codebook))
with open("data/successful.txt", "r") as filestream:
for line in filestream:
len_tmp = len(line.split(","))
if len_tmp > max_len:
max_len = len_tmp
currentline = line.replace("\n", "").split(",")
seq = []
for s in currentline:
if s in code_to_int.keys():
seq.append(int(code_to_int[s]))
else:
seq.append(500)
success.append(seq)
random.shuffle(success)
with open("data/unsuccessful.txt", "r") as fstream:
for line in fstream:
len_tmp = len(line.split(","))
if len_tmp > max_len:
max_len = len_tmp
currentline = line.replace("\n", "").split(",")
seq = []
for s in currentline:
if s in code_to_int.keys():
seq.append(int(code_to_int[s]))
else:
seq.append(400)
unsuccess.append(seq)
random.shuffle(unsuccess)
for i in range(0, kFolds):
foldSize_succ = int(float(len(success)) / kFolds)
foldSize_unsucc = int(float(len(unsuccess)) / kFolds)
idx_succ = range(i * foldSize_succ, i * foldSize_succ + foldSize_succ)
random.shuffle(idx_succ)
idx_unsucc = range(i * foldSize_unsucc, i * foldSize_unsucc + foldSize_unsucc)
random.shuffle(idx_unsucc)
test = [success[index] for index in idx_succ] + [unsuccess[index] for index in idx_unsucc]
train = [success[index] for index in range(0, len(success)) if index not in idx_succ] + \
[unsuccess[index] for index in range(0, len(unsuccess)) if index not in idx_unsucc]
random.shuffle(test)
random.shuffle(train)
folds.append([test, train])
return folds, max_len
###################################################################
def writeSampledSequences(X, y, codebook, outputdata_filename):
int_to_code = dict((i + 1, c) for i, c in enumerate(codebook))
f = open(outputdata_filename, "w")
for i in range(0, len(X)):
seq = []
for s in X[i]:
val = int(round(s))
if val > 0:
if val in int_to_code.keys():
seq.append(str(int_to_code[val]))
seq.append(str(y[i]))
f.write(",".join(seq) + "\n")
f.close()
#######################################################################
# create startified folds for cross validation
def createUnderOrOverSample(method, given_data, outputdata_filename, max_len, codebook):
dataX = []
dataY = []
for xx in given_data:
dataX.append(xx[0:-1])
dataY.append(xx[-1])
X = pad_sequences(dataX, maxlen=max_len, dtype='float32')
X_norm = X / (float(len(codebook)))
y_norm = np.array(dataY)
# perform over or under sampling
X_d = []
y_res = []
if method == "over":
sm = SMOTE(kind='borderline2')
X_res, y_res = sm.fit_sample(X_norm, y_norm)
else:
sm = ClusterCentroids()
X_res, y_res = sm.fit_sample(X_norm, y_norm)
X_d = X_res * (float(len(codebook)))
writeSampledSequences(X_d, y_res, codebook, outputdata_filename)
#######################################################################
# load transition dictionary
def loadDictionary(training_filename, n_order, label):
transition_dict = {}
with open(training_filename, "r") as file_stream:
for line in file_stream:
words = line.replace("\n", "").split(",")
actual_label = words[len(words) - 1]
if len(words) <= n_order:
continue
if label == actual_label:
for i in xrange(0, len(words) - n_order - 1):
current_tuple = tuple([words[j] for j in xrange(i, i + n_order + 1)])
if current_tuple in transition_dict.keys():
transition_dict[current_tuple] += 1
else:
transition_dict[current_tuple] = 1
return transition_dict
#######################################################################
# load transition dictionary
def loadTransitionDictionary(training_filename, n_order, label):
transition_dict = {}
with open(training_filename, "r") as file_stream:
for line in file_stream:
words = line.replace("\n", "").split(",")
actual_label = words[len(words) - 1]
if len(words) <= n_order:
continue
if label == actual_label:
for i in xrange(0, len(words) - n_order - 1):
current_tuple = tuple([words[j] for j in xrange(i, i + n_order)])
if current_tuple in transition_dict.keys():
transition_dict[current_tuple].append(words[i + n_order])
else:
transition_dict[current_tuple] = [words[i + n_order]]
return transition_dict
| [
"mehedi@wayne.edu"
] | mehedi@wayne.edu |
f1da8fb43bb78b4f502b576a1f67c671e6e1abed | 1a4bc1a11fdb3f714f22f5e0e826b47aa0569de2 | /lab/lab04/tests/q1_3.py | 79768f8de727ed291ba49dffc59b456e772584a8 | [] | no_license | taylorgibson/ma4110-fa21 | 201af7a044fd7d99140c68c48817306c18479610 | a306e1b6e7516def7de968781f6c8c21deebeaf5 | refs/heads/main | 2023-09-05T21:31:44.259079 | 2021-11-18T17:42:15 | 2021-11-18T17:42:15 | 395,439,687 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | test = { 'name': 'q1_3',
'points': None,
'suites': [{'cases': [{'code': ">>> disemvowel('datasceince')\n'dtscnc'", 'hidden': False, 'locked': False}], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest'}]}
| [
"taylorgibson@gmail.com"
] | taylorgibson@gmail.com |
af1ce0a5d138df4c7ce752ce6ffef31581f95c4b | 0a2432cb9e57f851eadc694fc6dba2f7233aa5fc | /szdgsadg.py | be5aaf0c71e7bb1d3728fd2eae31312c73b4fe0f | [] | no_license | Snehagovindg/sneha | dd7f39d6a4dceb877053ceacc4f3dfe9191c118c | ddde45e698d15d4ce45bf26f0ed68ab474bcce49 | refs/heads/master | 2020-04-20T05:14:06.901320 | 2019-02-15T05:46:05 | 2019-02-15T05:46:05 | 168,650,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py |
n=int(input())
temp=n
rev=0
while(n>0):
dig=n%10
rev=rev*10+dig
n=n//10
if(temp==rev):
print("yes")
else:
print("no")
| [
"noreply@github.com"
] | Snehagovindg.noreply@github.com |
8d0c62392f6d70fefb36a7c95a2490437b47f76d | 72667fd776fb0ab12f5973287361792d0e980365 | /Tugas Apl5B__20083000046_Garvasilus Mariano Gampur_2B.py | 2942b86519793254c477837a47877173908232d8 | [] | no_license | Sillus1902/Anogampurr | b40f9d32257393f1017f734d3cdda058bb60e396 | b25025196ba943b15dfe887131014315a539fc13 | refs/heads/main | 2023-06-01T15:54:27.470181 | 2021-06-13T17:51:10 | 2021-06-13T17:51:10 | 376,605,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | """
@Author Kambing Geprek:)
"""
jwb = "y"
while jwb=="y":
print("CEK GOLONGAN USIA")
print("------------------")
n = input("Masukkan Umur = ")
u = int(n)
if u>60:
status = "Lansia"
elif u>=35:
status = "Dewasa"
elif u>=18:
status = "Pemuda"
elif u>=15:
status = "Remaja"
else:
status="Anak"
print (status)
jwb = input(">> Mau mengulangi ? y/t = ")
if jwb=="t":
break | [
"noreply@github.com"
] | Sillus1902.noreply@github.com |
7df36e9b7011ced02211104785a1f7ec7ac131b9 | 93baa2d8d518345fe5b4a7e4958c5b5673bcdc39 | /spider2.1/telecom2/oldcrawl.py | bb57dd2c5799345cb5389b75e0e83fb0b86483a0 | [] | no_license | sunbopython/huoyan | 2e1fec8e4ab0d3d670775e6d4f34b335f8619a16 | 90c3900f5500e67df4c8f1b2ac17b945a4e73847 | refs/heads/master | 2020-03-20T23:06:18.628736 | 2018-06-19T02:35:30 | 2018-06-19T02:35:30 | 137,829,656 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 20,339 | py | # coding=utf-8
import os
import re
import json
import time
import urllib
from datetime import datetime
from datetime import timedelta
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
# 禁用安全请求警告
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from crawl_base import CrawlBase
from constants_spider import CRAWL_SUCCESS
from constants_spider import CRAWL_FAIL
from constants_spider import NEED_MORE
from constants_spider import CRAWL_TIMEOUT
from constants_spider import INPUT_ERROR
from constants_spider import OTHER_INFO
from constants_spider import TYPEVALUE
from constants_spider import WAITTIME
import request_handler
class SpiderMain(CrawlBase):
# 测试地址 http://192.168.30.135:8080/9f_insight/telecom/crawlData?phoneNum=9a46be7e7b929b230f7cf0c0d488e0f4&password=666309b93e7c8cc0937b913437bfed74&userid=testuserid_12345&nocache=1
# 发送验证码 http://192.168.30.135:8080/9f_insight/telecom/crawlData?token=5751123e-d862-4a33-b166-4a930ddb6bad&smscode=625493
def __init__(self, dict_json):
super(SpiderMain, self).__init__(dict_json, key=['password'])
self.phoneNum = dict_json['phoneNum']
self.password = dict_json['password']
self.token = dict_json['token']
self.session = requests.session()
# 爬取数据月数
self.month = 6
def crawl(self):
# 登录
self.account = self.password_login()
if self.status == None:
self.logger.info('登录成功')
data = {}
data['pay_info'] = self.crawl_hisBill()
# print json.dumps(data['pay_info'], ensure_ascii= False).encode('utf-8')
self.verifycode_handler()
if self.status == None:
data['call_info'], data['sms_info'], data['net_info'] = self.get_detail()
if self.status == None:
data['per_info'] = self.get_per_info()
result = json.dumps([data], ensure_ascii= False).encode('utf-8')
self.redisUtils.setNotify(type=TYPEVALUE,
token=self.token,
val="1",
decs="抓取成功!",
result=result)
self.logger.info('爬取成功')
# return True
return self.rmuserdirFlag
self.logger.info(self.desc)
self.redisUtils.setNotify(token=self.token, val=self.status, decs=self.desc)
# return True
return self.rmuserdirFlag
def password_login(self):
# 用户名密码登录,登录成功返回账户信息
self.logger.info('开始登陆')
account = request_handler.password_login(self.phoneNum, self.password)
self.takePage('login.html',
json.dumps(account, ensure_ascii=False).encode('utf-8'),
'登陆返回')
if account:
self.logger.info('登录成功')
return account
self.status = INPUT_ERROR
self.desc = '用户名或密码错误'
def crawl_hisBill(self):
self.logger.info('获取历史账单')
daylist = self.get_date_list()
mset = set([d[:6] for d in daylist])
data = []
for m in mset:
hisBill = request_handler.fetch_hisBill(self.account, m)
self.logger.info('爬取%s的历史账单'%m)
self.takePage('hisBill_%s.html'%m,
json.dumps(hisBill, ensure_ascii=False).encode('utf-8'),
'历史账单')
# 每月一日至三日为出帐日,查不到上一月的账单,所以查询时会返回服务器发生异常
if hisBill['Response']['ResponseData']['ResultDesc'] == u'服务器发生异常':
self.logger.info('服务器发生异常,爬取月份%s'%m)
continue
if hisBill['Response']['ResponseData']['ResultDesc'] == u'未检索到数据':
self.logger.info('未检索到数据,爬取月份%s'%m)
continue
if hisBill['Response']['ResponseData']['ResultDesc'] == u'账单处于出账期,请出账期以后进行查询':
self.logger.info('账单处于出账期,请出账期以后进行查询,爬取月份%s'%m)
continue
i = {}
d = hisBill['Response']['ResponseData']['Data']
# 未付费用
i['charge_pay'] = d['ChargeShouldpay'] if d['ChargeShouldpay'] != u'null' else ''
# 已付费用
i['charge_paid'] = d['ChargePaid'] if d['ChargePaid'] != u'null' else ''
# 该月消费总额
i['charge_all'] = d['ChargeAll'] if d['ChargeAll'] != u'null' else ''
# 用户名(姓名)
i['acct_name'] = d['AcctName'] if d['AcctName'] != u'null' else ''
self.AcctName = i['acct_name']
# 账户信息
i['account_info'] = d['ChargeAccountInfo'] if d['ChargeAccountInfo'] != u'null' else ''
# 日期
i['pay_date'] = m
# 可用预存款及可用赠款抵扣
i['charge_discount'] = ''
data.append(i)
return data
def verifycode_handler(self):
res = request_handler.send_detail_verifycode(self.account)
self.takePage('verifycode.html',
json.dumps(res, ensure_ascii=False).encode('utf-8'),
'发送短信验证码返回')
if res:
self.logger.info(res)
self.status = OTHER_INFO
self.desc = res
return
self.redisUtils.setNotify(token = self.token,
val = NEED_MORE,
decs='需要短信验证码')
stime = datetime.now()
self.logger.info('等待用户输入')
while True:
time.sleep(0.2)
inputValue = self.redisUtils.getNotify(self.token, 'smscode')
if inputValue:
self.logger.info('用户输入短信验证码为%s'%inputValue)
self.verifycode = inputValue
break
else:
eclipseTimes = datetime.now() - stime
if eclipseTimes.total_seconds() > WAITTIME:
self.logger.info('接收用输入输入超时:%s' % self.token)
self.status = INPUT_ERROR
self.desc = '接收用输入输入超时'
time.sleep(1)
return
def get_detail(self):
day_list = self.get_date_list()
call_info = self.crawl_detail_list('1', '语音', day_list)
if call_info:
call_info = self.get_call(call_info)
sms_info = self.crawl_detail_list('2', '短信', day_list)
if sms_info:
sms_info = self.get_sms(sms_info)
net_info = self.crawl_detail_list('3', '流量', day_list)
if net_info:
net_info = self.get_net(net_info)
return call_info, sms_info, net_info
return None, None, None
def get_per_info(self):
return {
# 账号归属省份
'province' : '',
# 账号归属城市
'city' : '',
# 身份证
'id_card' : '',
# 地址
'addr' : '',
# 等级
'level' : '',
# 数据源
'user_source' : 'CHINA_TELECOM',
# 账号星级
'starLevel' : '',
# 账号状态
'state' : '',
# 姓名
'real_name' : self.AcctName,
# 用户手机号码
'phone' : self.phoneNum,
# 运营商标识
'flag' : 'China Telecom',
# 入网时间
'open_time' : '',
# 客户性别
'custsex' : '',
# 使用套餐
'packageName' : '',
# 注册地址
'certaddr' : '',
# 最近登陆时间
'lasttime' : '',
}
def get_net(self, net_info):
new_net_info = []
for m in net_info:
for d in m:
i = {}
# 上网类型
i['net_type'] = d['NetType'] if d['NetType'] != u'null' else ''
# 上网时间
i['net_time'] = d['NetTime'] if d['NetTime'] != u'null' else ''
# 上网流量/KB
i['net_flow'] = d['NetFlow'] if d['NetFlow'] != u'null' else ''
# 花费金额
i['net_fee'] = d['NetFee'] if d['NetFee'] != u'null' else ''
# 上网地区
i['net_area'] = d['NetArea'] if d['NetArea'] != u'null' else ''
# 网络业务
i['net_business'] = d['NetBusiness'] if d['NetBusiness'] != u'null' else ''
new_net_info.append(i)
return new_net_info
def get_sms(self, sms_info):
new_sms_info = []
for m in sms_info:
for d in m:
i = {}
# 起始时间
i['sms_time'] = d['SmsTime'] if d['SmsTime'] != u'null' else ''
# 对方号码
i['sms_mobile'] = d['SmsMobile'] if d['SmsMobile'] != u'null' else ''
# 通话费
i['sms_fee'] = d['SmsCost'] if d['SmsCost'] != u'null' else ''
# 发送地区
i['sms_area '] = d['SmsArea'] if d['SmsArea'] != u'null' else ''
# 传送方式,1为接收,2为发送
# 短信详单只有发送的短信才会有记录,接受短信没有记录
i['sms_type'] = ''
# 原数据中有SmsType,SmsStyle两个字段,但app页面上只展示了:时间、号码、话费三个字段,不清楚这两个字段的真实含义,以及SmsStyle是否为业务类型,所以下面的业务类型暂时设为空
# 业务类型,01为国内短信,02为国际短信
i['sms_style'] = ''
new_sms_info.append(i)
return new_sms_info
def get_call(self, call_info):
new_call_info = []
for m in call_info:
for d in m:
i = {}
# 呼叫类型
i['trade_type'] = d['CallStyle'] if d['CallStyle'] != u'null' else ''
# 拨打时间
i['call_time'] = d['CallTime'] if d['CallTime'] != u'null' else ''
# 对方号码
i['receive_phone'] = d['CallMobile'] if d['CallMobile'] != u'null' else ''
# 对方归属地
i['called_home'] = d['CallArea'] if d['CallArea'] != u'null' else ''
# 通话费
i['call_fee'] = d['CallFee'] if d['CallFee'] != u'null' else ''
# 通话时长
i['trade_time'] = d['CallTimeCost'] if d['CallTimeCost'] != u'null' else ''
# 本机通话地
i['trade_addr'] = ''
# 通话类型
if d['CallType'] == u'0':
CallType = u'主叫'
if d['CallType'] == u'1':
CallType = u'被叫'
if d['CallType'] == u'null':
CallType = ''
i['call_type'] = CallType
# 记录内容
i['call_data'] = ''
new_call_info.append(i)
return new_call_info
def crawl_detail_list(self, detail_type, m, day_list):
detail_list = []
for day in day_list:
# 如果是电信系统的内部错误,会在尝试5次,如果不成功则跳过
# 如果是验证码不正确,直接返回
error_falg = 0
while True:
# time.sleep(0.2)
self.logger.info('正在爬取详单:%s的%s记录'%(day, m))
day_data = self.get_detail_list(day, detail_type, m)
if day_data == None:
return
elif day_data == -1:
if error_falg == 5:
self.status = OTHER_INFO
self.desc = '电信系统内部错误'
self.logger.info('电信系统内部错误')
return
error_falg += 1
continue
else:
break
detail_list.append(day_data)
return detail_list
def get_detail_list(self, day, data_type, m):
"""
发送请求查询详单
短信详单只有发送的短信才会有记录,接受短信没有记录
"""
# 获取数据
detail = request_handler.fetch_detail(self.account,
self.verifycode,
day.decode('utf-8'),
data_type)
self.takePage('detail_list_%s_%s.html'%(day, data_type),
json.dumps(detail, ensure_ascii=False).encode('utf-8'),
'%s%s详单'%(day, m))
detail['Response']['ResponseData']['Date'] = day
# 这里判断验证码的异常问题
if detail['Response']['ResponseData']['ResultDesc'] == u'验证码不正确':
self.logger.info('详单验证码错误或已超时')
self.status = INPUT_ERROR
self.desc = '短信验证码不正确'
return
if detail['Response']['ResponseData']['ResultDesc'] == u'第3方系统业务失败':
return -1
if detail['Response']['ResponseData']['ResultDesc'] == u'服务器发生异常':
return -1
detail_list = []
date = detail['Response']['ResponseData']['Date']
if detail['Response']['ResponseData']['Data'] != None:
if detail['Response']['ResponseData']['Data']['Items'] != None:
detail_item = detail['Response']['ResponseData']['Data']['Items']['Item']
if isinstance(detail_item, dict):
detail_list.append(detail_item)
else:
for i in detail_item:
detail_list.append(i)
return detail_list
def get_date_list(self):
endday = datetime.now()
day = timedelta(days=-(self.month*30))
startday = (endday + day).strftime('%Y%m%d')
timespan = timedelta(days=1)
daylist = []
while True:
day = endday = (endday - timespan)
sday = day.strftime("%Y%m%d")
daylist.append(sday)
if sday == startday:
break
return daylist
# def data_handler(self, status, detail_list=None, hisBill=None):
# """
# 处理爬取到的数据,爬取不成功直接向redis发送爬取失败的通知直接结束爬虫,如果爬取成功向redis发送通知后会返回数据
# 密码错误,发送的是爬取失败,服务端暂未确定状态代码,后期需要完善
# """
# self.redisUtils.notifyComplete(constants.SUCCESS_SPIDER, self.token, '爬取成功')
# data = {
# 'status' : str(status),
# 'username': self.phoneNum,
# 'source_name' : u'中国电信',
# 'token' : self.token,
# 'data' : {
# 'detail_list' : detail_list,
# 'hisBill' : hisBill,
# },
# 'desc' : u'获取成功'
# }
# data = json.dumps(data, ensure_ascii = False).encode('utf-8')
# return data
# def safecode_login(self):
# """ 发送登录验证码,并获取验证码 """
# # 登录成功返回账户信息
# # 验证码错误时会向redis发送通知,通知redis发送的是需要短信验证码,因为服务端暂未确定验证码错误的状态代码,后期需要完善,然后会自动等待用户发送再次键入验证码或者刷新验证码的指令,刷新指令为''(空)
# # 用户发送验证码失败的情况暂未考虑
# request_handler.send_login_verifycode(self.phoneNum)
# self.redisUtils.notifyComplete(constants.NEED_SMSCODE_STATUS, self.token, '需要短信验证码')
# self.logger.info('登录验证码已发送')
# while True:
# verifycode = self.get_safecode()
# if verifycode == 'reset':
# self.redisUtils.notifyComplete(constants.NEED_SMSCODE_STATUS, self.token, '需要短信验证码')
# self.logger.info('用户刷新验证码')
# request_handler.send_login_verifycode(self.phoneNum)
# self.logger.info('验证码已发送')
# continue
# elif verifycode == -3:
# return verifycode
# account = request_handler.verifycode_login(self.phoneNum, verifycode)
# if account:
# return account
# self.redisUtils.notifyComplete(constants.NEED_SMSCODE_STATUS, self.token, '验证码不正确')
# logger.info('验证码错误')
# def get_detail(self, account, month):
# request_handler.send_detail_verifycode(account)
# logger.info('详单验证码已发送')
# self.redisUtils.notifyComplete(constants.NEED_SMSCODE_STATUS, self.token, '需要短信验证码')
# while True:
# verifycode = self.get_safecode()
# if verifycode == 'reset':
# self.redisUtils.notifyComplete(constants.NEED_SMSCODE_STATUS, self.token, '需要短信验证码')
# self.logger.info('用户刷新验证码')
# request_handler.send_login_verifycode(self.phoneNum)
# self.logger.info('验证码已发送')
# continue
# elif verifycode == -3:
# return verifycode
# detail_list = self.crawl_detail_list(month, verifycode, account)
# if detail_list == -4:
# self.redisUtils.notifyComplete(constants.NEED_SMSCODE_STATUS, self.token, '验证码不正确')
# logger.info('验证码错误')
# continue
# return detail_list
# def get_safecode(self):
# # 从redis中获取验证码,会等待5分钟,5分钟如果没有收到会会向redis发送通知,通知redis发送的是爬取失败,因为服务端暂未确定验证码超时的状态代码,后期需要完善
# out_time = int((time.time())) + (3*60)
# while True:
# dict_json = self.redisUtils.getCode('telecom:code:' + self.token)
# # print dict_json
# if dict_json != None:
# verifycode = dict_json['password']
# logger.info('输入验证码为%s'%verifycode)
# return verifycode
# else:
# print out_time
# print int((time.time()))
# if int((time.time())) > out_time:
# self.logger.info('等待用户超时')
# return -3
# self.logger.info('正在等待验证码')
if __name__ == '__main__':
username = '18101205290'
password = '121212'
token = '12343453451'
login_type = '1'
dianxin = Dianxin(username, password, login_type, token)
print dianxin.get_safecode('123')
# data, status = dianxin.crawl()
# print status
# # data = json.dumps(data, ensure_ascii = False).encode('utf-8')
# with open('data.json', 'w') as f:
# f.write(data + '\n')
# date_list = dianxin.get_date_list(6)
# dianxin.crawl_hisBill(6)
# month_list = dianxin.get_date_list(6)
# crawl_hisBill = dianxin.crawl_hisBill(month_list)
# crawl_hisBill = json.dumps(crawl_hisBill, ensure_ascii = False).encode('utf-8')
# print crawl_hisBill
| [
"2238359556@qq.com"
] | 2238359556@qq.com |
eb1af6d2163ec05242c3594329c21ca8ac6c76cf | 6b0dec7f320f343a8466c7ddfa36234405165c4e | /tutorial/settings.py | 12e0c87fcbb67c7b6e68dd964764deed988aac09 | [] | no_license | mooncalfskb/py_rest_tutorial | 11ee536d92f210049b801b7b34fe58e837d1d728 | 29812540358a1e12731d1a654eb1b67f37405f71 | refs/heads/master | 2021-01-20T14:40:30.376555 | 2017-02-24T06:48:53 | 2017-02-24T06:48:53 | 82,767,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,686 | py | """
Django settings for tutorial project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import mysql.connector
import csv
# set up mysql auth
with open('/webroot_auth/mysql_auth_py.txt', 'r') as csvfile:
mysql_auth = csv.reader(csvfile, delimiter=',')
data = list(mysql_auth)
mysql_user = data[0][0]
mysql_password = data[0][1]
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u5e@+7a#7eo4qpn=&#kk5u1z0&5_t!^c_t82s*yw75y-4b$6ma'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'rest_framework',
'snippets.apps.SnippetsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
REST_FRAMEWORK = {
'PAGE_SIZE': 8
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'mysql.connector.django',
'NAME': 'rest_py',
'USER': mysql_user,
'PASSWORD': mysql_password,
'HOST': '127.0.0.1',
'STORAGE_ENGINE': 'MyISAM'
}
}
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
"""
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"mooncalf@Mooncalf.local"
] | mooncalf@Mooncalf.local |
12e04a274aafe908ae093771908f1f49d75992b5 | f2e5944e9996cc74cc97bb6feb494f30d9040c0c | /repronim_support/directives.py | 20c22d848dfb59af216dbf88e08efa26aa26e5b1 | [
"MIT"
] | permissive | ReproNim/HowWouldReproNim | c348738c1e7cf8eed0cc7478f37cd158e2734d85 | 138f99c7931a6503e5762eff773648e44a9cbac1 | refs/heads/master | 2022-12-18T12:18:48.887914 | 2020-10-05T18:21:07 | 2020-10-05T18:21:07 | 271,655,315 | 8 | 1 | MIT | 2020-10-05T18:21:09 | 2020-06-11T22:05:15 | Python | UTF-8 | Python | false | false | 4,330 | py | # -*- coding: utf-8 -*-
from docutils import nodes
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
class heresthegist(nodes.Admonition, nodes.Element):
"""Custom "heresthegist" admonition for summaries."""
def visit_heresthegist_html(self, node):
# it is a simple div with a dedicated CSS class assigned
self.body.append(
self.starttag(
node, 'div', CLASS=('admonition ' + 'heresthegist')))
node.insert(0, nodes.title(
'first',
"Here's the gist"))
def depart_heresthegist_html(self, node):
self.depart_admonition(node)
def visit_heresthegist_latex(self, node):
self.body.append("""
\\begin{tcolorbox}[
enhanced,
breakable,
drop lifted shadow,
sharp corners,
title=Here's the gist,
coltitle=dataladgray,
colbacktitle=dataladblue,
colframe=dataladblue!70!black,
fonttitle=\\bfseries]
""")
def depart_heresthegist_latex(self, node):
self.body.append('\n\n\\end{tcolorbox}\n')
class HeresTheGist(BaseAdmonition):
"""
An admonition summarizing the RepoNim lesson.
"""
node_class = heresthegist
class findoutmore(nodes.container):
"""Custom "findoutmore" container."""
pass
def visit_findoutmore_html(self, node):
self.visit_container(node)
def depart_findoutmore_html(self, node):
self.depart_container(node)
def visit_findoutmore_latex(self, node):
self.body.append("""
\\begin{tcolorbox}[
enhanced,
breakable,
drop lifted shadow,
sharp corners,
title=Find out more,
coltitle=dataladgray,
colbacktitle=dataladyellow,
colframe=dataladyellow!70!black,
fonttitle=\\bfseries]
""")
def depart_findoutmore_latex(self, node):
self.body.append('\n\n\\end{tcolorbox}\n')
class FindOutMore(BaseAdmonition):
"""findoutmore RST directive
The idea here is to use an admonition to parse the RST,
but actually fully replace it afterwards with a custom
node structure. This is done to be able to replace a
rather verbose custom markup that was used before in the
book. Eventually, it may be replaced (in here) with
something completely different -- without having to change
content and markup in the book sources.
"""
node_class = nodes.admonition
# empty is no allowed
has_content = True
# needs at least a one word titel
required_arguments = 1
def run(self):
# this uses the admonition code for RST parsion
docnodes = super(FindOutMore, self).run()
# but we throw away the title, because we want to mark
# it up as a 'header' further down
del docnodes[0][0]
# now put the entire admonition structure into a container
# that we assign the necessary class to make it 'toggle-able'
# in HTML
# outer container
toggle = findoutmore(
'toogle',
# header line with 'Find out more' prefix
nodes.paragraph(
# place actual admonition title we removed
# above
'title', self.arguments[0],
# add (CSS) class
classes=['header'],
),
# place the rest of the admonition structure after the header,
# but still inside the container
*docnodes[0].children,
# properly identify as 'findoutmore' to enable easy custom
# styling, and also tag with 'toggle'. The later is actually
# not 100% necessary, as 'findoutmore' could get that
# functional assigned in CSS instead (maybe streamline later)
classes=['toggle', 'findoutmore'],
)
return [toggle]
def setup(app):
app.add_node(
heresthegist,
html=(visit_heresthegist_html, depart_heresthegist_html),
latex=(visit_heresthegist_latex, depart_heresthegist_latex),
)
app.add_directive('heresthegist', HeresTheGist)
app.add_node(
findoutmore,
html=(visit_findoutmore_html, depart_findoutmore_html),
latex=(visit_findoutmore_latex, depart_findoutmore_latex),
)
app.add_directive('findoutmore', FindOutMore)
# vim: set expandtab shiftwidth=4 softtabstop=4 :
| [
"adina.wagner@t-online.de"
] | adina.wagner@t-online.de |
e670efdc533eb3c816b2a66880377cd5c4515fbe | cce805df23c9560a778f46da127e974f74d2ff24 | /water-meter/upImage.py | c861ab878e3f719cc41d32f770faf860fcf60c24 | [] | no_license | Devumpe/Water-meter-raspberrypi-old | 5784780a9b8da0ca1eb6dcd1506160777cd2c91a | e610cf4c4bbb78c851f78cb898a200509392310c | refs/heads/master | 2022-12-09T22:45:21.328432 | 2019-05-26T12:23:08 | 2019-05-26T12:23:08 | 182,488,924 | 0 | 0 | null | 2022-12-08T05:00:32 | 2019-04-21T04:20:57 | Python | UTF-8 | Python | false | false | 3,106 | py | #-*- coding: utf-8 -*-
from google.cloud import storage
import pyrebase
import time
from tkinter import *
from PIL import ImageTk, Image
from subprocess import Popen
import os
import urllib.request
def readFile():
text_file = open('nwdata.txt','r')
line = text_file.read().splitlines()
print (len(line))
for i in range(0,len(line)):
if(i%3==0):
textdate = line[i]
textrfid = line[i+1]
textimage = line[i+2]
upimage(textdate , textrfid , textimage)
#return (textdate , textrfid , textimage)
def internet_on():
try:
urllib.request.urlopen("http://www.google.com/")
#Popen(["python", "upimage.py"])
readFile()
except urllib.error.URLError as err:
print ("Please check your internet.")
def upimage(date , rfid , image):
# ไฟล์ที่จะอัพ
config = {
"apiKey": "AIzaSyB_jnpsPaxKs3xEhs-AbknZJXjcK-M4IeU",
"authDomain": "water-meter-235712.firebaseapp.com",
"databaseURL": "https://water-meter-235712.firebaseio.com",
"projectId": "water-meter-235712",
"storageBucket": "water-meter-235712.appspot.com",
"messagingSenderId": "67042893322"
}
# data = readFile()
# date = data[0]
# rfid = data[1]
# image = data[2]
filename = image
credential_path = "/home/pi/water-meter/water-meter-235712-firebase-adminsdk-ebgws-8362ef71ab.json"
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credential_path
# ที่อยู่ bucket
client = storage.Client()
bucket = client.get_bucket('water-meter-235712.appspot.com')
# อัพไฟล์ storage
blob = bucket.blob('image/'+image)
with open(filename, "rb") as fp:
blob.upload_from_file(fp)
print(blob.public_url)
#อัพไฟล์ database
firebase = pyrebase.initialize_app(config)
db = firebase.database()
db.child("room").push({"image": {"rfid":rfid,"url": blob.public_url,"date": date}})
open("nwdata.txt", 'w').close()
if __name__ == "__main__":
#internet_on()
root = Tk()
canv = Canvas(root, width=1900, height=1000, bg='white')
canv.grid(row=2, column=3)
labeltexttop = Label(root,text="Water Meter Camera", font='Helvetica 35 bold',background='DeepSkyblue2',foreground='white')
labeltexttop.place(relx=0.5, rely=0,relwidth=1, relheight=0.2,anchor='n')
photo = Image.open("correct.jpg")
photo.resize((5, 5), Image.ANTIALIAS)
img = ImageTk.PhotoImage(photo) # PIL solution
background_label = Label(root,background='white', image=img)
background_label.place(relx=0.5, rely=0.25,relwidth=0.4, relheight=0.4,anchor='n')
textlabel = Label(root,background='white',foreground='DeepSkyblue3',text='Upload Done!',font='Helvetica 22 bold')
textlabel.place(relx=0.5, rely=0.75,anchor='n')
button = Button(root,text="NEXT", font='Helvetica 30' , background='DeepSkyblue3',foreground='white',command=quit)
button.place(relx=0.3, rely=0.83, relheight=0.15, relwidth=0.4)
root.mainloop()
| [
"noreply@github.com"
] | Devumpe.noreply@github.com |
514b8a77adedb3725ee02b5b4f716985cab6ddf1 | a7328706cf219fcb398f51b268bb1884962f69a4 | /src/build.py | 91592fcd495e9cfa4a904d942c5562d8652cfd0e | [] | no_license | simplifies/unique-scripts | 2bd80800ad680087cb92e2b28877d3e918d3b517 | b1d8aee106b6b5600e40d02f1ee255e1174c7f6e | refs/heads/master | 2023-07-14T13:26:24.535126 | 2021-08-23T22:16:56 | 2021-08-23T22:16:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,365 | py | #!/usr/bin/python3
import os
import shutil
import sys
from subprocess import run, CalledProcessError
import tw
def build(mdir, git, ref=None, make=None, cmake=None):
try:
os.chdir(tw.srcdir)
if not os.path.isdir(mdir):
run(['git', 'clone', git, mdir], check=True)
os.chdir(mdir)
if ref:
run(['git', 'fetch'], check=True)
run(['git', 'checkout', ref], check=True)
else:
run(['git', 'pull'], check=True)
if cmake:
if not os.path.isdir('build'):
os.mkdir('build')
os.chdir('build')
run(['cmake'] + cmake + ['..'], check=True)
if not make:
make = ['../bam/bam', 'server_release']
if cmake and make[0] != 'make':
make = ['make'] + make
if make[0] == 'bam':
make[0] = '../bam/bam'
run(make, check=True)
print('\033[1;92mSuccessfully built {}\033[0m'.format(mdir))
return True
except CalledProcessError:
print('\033[1;91mFailed to build {}\033[0m'.format(mdir))
return False
success = build('bam', 'git@github.com:matricks/bam.git', ref='v0.4.0', make='./make_unix.sh')
if not success:
sys.exit()
mods = tw.select_items(tw.srcmods.keys(), sys.argv[1:])
for mod in mods:
build(mod, **tw.srcmods[mod])
| [
"tim@timakro.de"
] | tim@timakro.de |
76a9437eb573e51c11d448e66f62f540f777f582 | d109f5bcaa06b2133ed3dafc9b4de8a0a16e6716 | /Modules/GridFormDemo.py | aae2fa6a2e0e8177ceca0bbaf88751477587f236 | [] | no_license | DezeZhao/PyQt5 | fd7974771d51654bbf27d3dd3358c43fcd3f6e36 | 56bd3befa237d269676db2f0ef83653ba85e366b | refs/heads/master | 2021-02-08T02:28:47.848330 | 2020-04-05T05:55:27 | 2020-04-05T05:55:27 | 244,098,840 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,848 | py | # -*- coding: utf-8 -*-
# @Time : 2020/3/9 15:38
# @Software: PyCharm
# @File : GridFormDemo.py
# @Author : DezeZhao
"""
跨行跨列设计表单
某个控件占用多列或多行
"""
import sys
from PyQt5.QtWidgets import *
class GridFromDemo(QWidget):
def __init__(self):
super(GridFromDemo, self).__init__()
self.setWindowTitle('栅格布局:表单设计')
# self.resize(500, 500)
wg = QWidget(self) # 整个窗口为wg 绝对布局
vlayout = QVBoxLayout(wg) # 将垂直布局放在窗口中
grid = QGridLayout()
form = QFormLayout()
titleLabel = QLabel('标题')
authorLabel = QLabel('作者')
contentsLabel = QLabel('内容')
titleEdit = QLineEdit()
authorEdit = QLineEdit()
contentsEdit = QTextEdit()
plan1 = QLabel('QGridLayout achieve')
titleLabel1 = QLabel('标题')
authorLabel1 = QLabel('作者')
contentsLabel1 = QLabel('内容')
titleEdit1 = QLineEdit()
authorEdit1 = QLineEdit()
contentsEdit1 = QTextEdit()
plan2 = QLabel('QFormLayout achieve')
grid.addWidget(plan1, 0, 0, 1, 2)
grid.addWidget(titleLabel, 1, 0)
grid.addWidget(titleEdit, 1, 1)
grid.addWidget(authorLabel, 2, 0)
grid.addWidget(authorEdit, 2, 1)
grid.addWidget(contentsLabel, 3, 0)
grid.addWidget(contentsEdit, 3, 1, 5, 1)
form.addRow(plan2)
form.addRow(titleLabel1, titleEdit1)
form.addRow(authorLabel1, authorEdit1)
form.addRow(contentsLabel1, contentsEdit1)
vlayout.addLayout(grid)
vlayout.addLayout(form)
self.setLayout(vlayout)
if __name__ == '__main__':
app = QApplication(sys.argv)
main = GridFromDemo()
main.show()
sys.exit(app.exec_())
| [
"1360536767@qq.com"
] | 1360536767@qq.com |
22d35c0c4abf74c77910597730fe81254d43af7e | fdd1742cdf6cbc1c846414af8f31b3ab4796e79a | /evaluation_normalized_without_shuffle.py | 1c2a2405a0d79069413efdc694bf2a8d02026020 | [] | no_license | KarimMibrahim/user_based_contexts_tagging | fe5a704277eb2908a4ab0a00a64eda238cb85c45 | d24e1bec92bc0ac43a3916116a7811fa538ded46 | refs/heads/master | 2021-04-04T21:53:11.610065 | 2020-07-13T13:50:02 | 2020-07-13T13:50:02 | 248,492,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,899 | py | # General Imports
import os
import numpy as np
import pandas as pd
from time import strftime, localtime
import matplotlib.pyplot as plt
import seaborn as sn
# Deep Learning
import tensorflow as tf
from sklearn.metrics import cohen_kappa_score,f1_score,accuracy_score, precision_score, recall_score, classification_report, roc_auc_score, \
hamming_loss
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.utils import check_random_state
plt.rcParams.update({'font.size': 22})
os.environ["CUDA_VISIBLE_DEVICES"]="1,2"
#TODO: fix directories
SOURCE_PATH = "/srv/workspace/research/user_based_contexts_tagging/"
SPECTROGRAMS_PATH = "/srv/workspace/research/user_based_contexts_tagging/dataset/"
OUTPUT_PATH = "/srv/workspace/research/user_based_contexts_tagging/experiments_results/"
EXTRA_OUTPUTS = "/srv/workspace/research/extra_experiment_results"
EXPERIMENTNAME = "single_label_deeper_weighted_normalized"
INPUT_SHAPE = (646, 96, 1)
EMBEDDINGS_DIM = 256
#TODO: fix labels
LABELS_LIST = ['car', 'gym', 'happy', 'night', 'relax',
'running', 'sad', 'summer', 'work', 'workout']
global_user_embeddings = pd.read_pickle("/srv/workspace/research/user_based_contexts_tagging/GroundTruth/user_embeddings.pkl")
global_labels = pd.read_csv("/srv/workspace/research/user_based_contexts_tagging/GroundTruth/all_labels_clipped.csv")
train_partial = pd.read_csv("/srv/workspace/research/user_based_contexts_tagging/GroundTruth/train_single.csv")
POS_WEIGHTS = len(train_partial)/train_partial.sum()[2:]
POS_WEIGHTS = [np.float32(x) for x in POS_WEIGHTS]
BATCH_SIZE = 32
from tensorflow.keras.backend import set_session
def limit_memory_usage(gpu_memory_fraction=0.1):
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
set_session(tf.Session(config=config))
limit_memory_usage(0.3)
def dataset_from_csv(csv_path, **kwargs):
"""
Load dataset from a csv file.
kwargs are forwarded to the pandas.read_csv function.
"""
df = pd.read_csv(csv_path, **kwargs)
dataset = (
tf.data.Dataset.from_tensor_slices(
{
key:df[key].values
for key in df
}
)
)
return dataset
def set_tensor_shape(tensor, tensor_shape):
"""
set shape for a tensor (not in place, as opposed to tf.set_shape)
"""
tensor.set_shape(tensor_shape)
return tensor
def check_tensor_shape(tensor_tf, target_shape):
"""
Return a Tensorflow boolean graph that indicates whether sample[features_key] has the specified target shape
Only check not None entries of target_shape.
"""
res = tf.constant(True)
for idx,target_length in enumerate(target_shape):
if target_length:
res = tf.logical_and(res, tf.equal(tf.constant(target_length), tf.shape(tensor_tf)[idx]))
return res
def load_spectrogram(*args):
"""
loads spectrogram with error tracking.
args : song ID, path to dataset
return:
Features: numpy ndarray, computed features (if no error occured, otherwise: 0)
Error: boolean, False if no error, True if an error was raised during features computation.
"""
# TODO: edit path
path = SPECTROGRAMS_PATH
song_id, dummy_path = args
try:
# tf.logging.info(f"Load spectrogram for {song_id}")
spect = np.load(os.path.join(path, "mels" + str(song_id) + '.npz'))['arr_0']
if (spect.shape != (1, 646, 96)):
# print("\n Error while computing features for" + str(song_id) + '\n')
return np.float32(0.0), True
# spect = spect[:,215:215+646]
# print(spect.shape)
return spect, False
except Exception as err:
# print("\n Error while computing features for " + str(song_id) + '\n')
return np.float32(0.0), True
def load_spectrogram_tf(sample, identifier_key="song_id",
path="/my_data/MelSpectograms_top20/", device="/cpu:0",
features_key="features"):
"""
wrap load_spectrogram into a tensorflow function.
"""
with tf.device(device):
input_args = [sample[identifier_key], tf.constant(path)]
res = tf.py_func(load_spectrogram,
input_args,
(tf.float32, tf.bool),
stateful=False),
spectrogram, error = res[0]
res = dict(list(sample.items()) + [(features_key, spectrogram), ("error", error)])
return res
# Dataset pipelines
def get_embeddings_py(sample_user_id):
user_embeddings = global_user_embeddings[global_user_embeddings.user_id == sample_user_id]
samples_user_embeddings = user_embeddings.iloc[:, 1:].values.flatten()
samples_user_embeddings = np.asarray(samples_user_embeddings[0])
samples_user_embeddings = samples_user_embeddings.astype(np.float32)
return samples_user_embeddings
def tf_get_embeddings_py(sample, device="/cpu:0"):
with tf.device(device):
input_args = [sample["user_id"]]
user_embeddings = tf.py_func(get_embeddings_py,
input_args,
[tf.float32],
stateful=False)
res = dict(
list(sample.items()) + [("user_embeddings", user_embeddings)])
return res
# Dataset pipelines
def get_labels_py(song_id,user_id):
labels = global_labels[global_labels.song_id == song_id][global_labels.user_id == user_id]
labels = labels.iloc[:, 2:].values.flatten() # TODO: fix this shift in dataframe columns when read
labels = labels.astype(np.float32)
return labels
def tf_get_labels_py(sample, device="/cpu:0"):
with tf.device(device):
input_args = [sample["song_id"],sample["user_id"]]
labels = tf.py_func(get_labels_py,
input_args,
[tf.float32],
stateful=False)
res = dict(list(sample.items()) + [("binary_label", labels)])
return res
def get_dataset(input_csv, input_shape=INPUT_SHAPE, batch_size=32, shuffle=True,
infinite_generator=True, random_crop=False, cache_dir=os.path.join(OUTPUT_PATH, "tmp/tf_cache/"),
num_parallel_calls=32):
# build dataset from csv file
dataset = dataset_from_csv(input_csv)
# Shuffle data
if shuffle:
dataset = dataset.shuffle(buffer_size=100, seed=1, reshuffle_each_iteration=True)
# compute mel spectrogram
dataset = dataset.map(lambda sample: load_spectrogram_tf(sample), num_parallel_calls=1)
# filter out errors
dataset = dataset.filter(lambda sample: tf.logical_not(sample["error"]))
# map dynamic compression
C = 100
dataset = dataset.map(lambda sample: dict(sample, features=tf.log(1 + C * sample["features"])),
num_parallel_calls=num_parallel_calls)
# Apply permute dimensions
dataset = dataset.map(lambda sample: dict(sample, features=tf.transpose(sample["features"], perm=[1, 2, 0])),
num_parallel_calls=num_parallel_calls)
# Filter by shape (remove badly shaped tensors)
dataset = dataset.filter(lambda sample: check_tensor_shape(sample["features"], input_shape))
# set features shape
dataset = dataset.map(lambda sample: dict(sample,
features=set_tensor_shape(sample["features"], input_shape)))
# if cache_dir:
# os.makedirs(cache_dir, exist_ok=True)
# dataset = dataset.cache(cache_dir)
dataset = dataset.map(lambda sample: tf_get_labels_py(sample), num_parallel_calls=1)
# set output shape
dataset = dataset.map(lambda sample: dict(sample, binary_label=set_tensor_shape(
sample["binary_label"], (len(LABELS_LIST)))))
# load embeddings
dataset = dataset.map(lambda sample: tf_get_embeddings_py(sample), num_parallel_calls=1)
# set weights shape
dataset = dataset.map(lambda sample: dict(sample, user_embeddings=set_tensor_shape(
sample["user_embeddings"], EMBEDDINGS_DIM)))
if infinite_generator:
# Repeat indefinitly
dataset = dataset.repeat(count=-1)
# Make batch
dataset = dataset.batch(batch_size)
# Select only features and annotation
dataset = dataset.map(lambda sample: (
sample["features"], sample["binary_label"], sample["user_embeddings"],sample["song_id"],sample["user_id"]))
return dataset
def get_training_dataset(path):
return get_dataset(path, shuffle=True,
cache_dir=os.path.join(OUTPUT_PATH, "tmp/tf_cache/training/"))
def get_validation_dataset(path):
return get_dataset(path, batch_size=32, shuffle=False,
random_crop=False, cache_dir=os.path.join(OUTPUT_PATH, "tmp/tf_cache/validation/"))
def get_test_dataset(path):
return get_dataset(path, batch_size=50, shuffle=False,
infinite_generator=False, cache_dir=os.path.join(OUTPUT_PATH, "tmp/tf_cache/test/"))
def load_test_set_raw(LOADING_PATH=os.path.join(SOURCE_PATH, "GroundTruth/"),
SPECTROGRAM_PATH=SPECTROGRAMS_PATH):
# Loading testset groundtruth
test_ground_truth = pd.read_csv(os.path.join(LOADING_PATH, "test_ground_truth_binarized.csv"))
all_ground_truth = pd.read_csv(os.path.join(LOADING_PATH, "balanced_ground_truth_hot_vector.csv"))
# all_ground_truth.drop("playlists_count", axis=1, inplace=True);
all_ground_truth = all_ground_truth[all_ground_truth.song_id.isin(test_ground_truth.song_id)]
all_ground_truth = all_ground_truth.set_index('song_id')
all_ground_truth = all_ground_truth.loc[test_ground_truth.song_id]
test_classes = all_ground_truth.values
test_classes = test_classes.astype(int)
spectrograms = np.zeros([len(test_ground_truth), 646, 96])
songs_ID = np.zeros([len(test_ground_truth), 1])
for idx, filename in enumerate(list(test_ground_truth.song_id)):
try:
spect = np.load(os.path.join(SPECTROGRAM_PATH, str(filename) + '.npz'))['arr_0']
except:
continue
if (spect.shape == (1, 646, 96)):
spectrograms[idx] = spect
songs_ID[idx] = filename
# Apply same transformation as trianing [ALWAYS DOUBLE CHECK TRAINING PARAMETERS]
C = 100
spectrograms = np.log(1 + C * spectrograms)
spectrograms = np.expand_dims(spectrograms, axis=3)
return spectrograms, test_classes
def get_weights(shape):
w = tf.Variable(tf.truncated_normal(shape, stddev=0.1))
# variable_summaries(w)
return w
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
b = tf.Variable(initial)
# variable_summaries(b)
return b
def conv_2d(x, W, name=""):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], padding="SAME", name=name)
def max_pooling(x, shape, name=""):
return tf.nn.max_pool(x, shape, strides=[1, 2, 2, 1], padding="SAME", name=name)
def conv_layer_with_relu(input, shape, name=""):
W = get_weights(shape)
b = bias_variable([shape[3]])
return tf.nn.relu(conv_2d(input, W, name) + b)
def full_layer(input, size):
in_size = int(input.get_shape()[1])
W = get_weights([in_size, size])
b = bias_variable([size])
return tf.matmul(input, W) + b
def get_model(x_input,user_embeddings, current_keep_prob, train_phase):
# Define model architecture
# C4_model
x_norm = tf.layers.batch_normalization(x_input, training=train_phase)
embeds_norm = tf.layers.batch_normalization(user_embeddings, training=train_phase)
with tf.name_scope('CNN_1'):
conv1 = conv_layer_with_relu(x_norm, [3, 3, 1, 32], name="conv_1")
max1 = max_pooling(conv1, shape=[1, 2, 2, 1], name="max_pool_1")
with tf.name_scope('CNN_2'):
conv2 = conv_layer_with_relu(max1, [3, 3, 32, 64], name="conv_2")
max2 = max_pooling(conv2, shape=[1, 2, 2, 1], name="max_pool_2")
with tf.name_scope('CNN_3'):
conv3 = conv_layer_with_relu(max2, [3, 3, 64, 128], name="conv_3")
max3 = max_pooling(conv3, shape=[1, 2, 2, 1], name="max_pool_3")
with tf.name_scope('CNN_4'):
conv4 = conv_layer_with_relu(max3, [3, 3, 128, 256], name="conv_4")
max4 = max_pooling(conv4, shape=[1, 2, 2, 1], name="max_pool_4")
"""
with tf.name_scope('CNN_5'):
conv5 = conv_layer_with_relu(max4, [3, 3, 256, 256], name="conv_5")
max5 = max_pooling(conv5, shape=[1, 2, 2, 1], name="max_pool_5")
with tf.name_scope('CNN_6'):
conv6 = conv_layer_with_relu(max5, [3, 3, 256, 256], name="conv_6")
max6 = max_pooling(conv6, shape=[1, 2, 2, 1], name="max_pool_6")
with tf.name_scope('Fully_connected_1'):
flattened = tf.reshape(max6, [-1, 11 * 2 * 256])
fully1 = tf.nn.sigmoid(full_layer(flattened, 256))
"""
with tf.name_scope('embedding_layer_1'):
embeddings_1 = full_layer(embeds_norm, 128)
embeds_1_norm = tf.layers.batch_normalization(embeddings_1, training=train_phase)
"""
with tf.name_scope('flattened_layer_1'):
flattened = tf.reshape(max4, [-1, 41 * 6 * 256])
spect_1 = tf.nn.sigmoid(full_layer(flattened, 2048))
spect_2 = tf.nn.sigmoid(full_layer(spect_1, 256))
spect_3 = tf.nn.sigmoid(full_layer(spect_2, 128))
"""
with tf.name_scope('Fully_connected_1'):
flattened = tf.reshape(max4, [-1, 41 * 6 * 256])
flattened_norm = tf.layers.batch_normalization(flattened, training=train_phase)
concatenated = tf.concat([flattened_norm,embeds_1_norm],1)
fully1 = tf.nn.sigmoid(full_layer(concatenated, 128))
with tf.name_scope('Fully_connected_2'):
dropped = tf.nn.dropout(fully1, keep_prob=current_keep_prob)
logits = full_layer(dropped, len(LABELS_LIST))
output = tf.nn.softmax(logits)
tf.summary.histogram('outputs', output)
return logits, output
def evaluate_model(test_pred_prob, test_classes, saving_path, evaluation_file_path):
"""
Evaluates a given model using accuracy, area under curve and hamming loss
:param model: model to be evaluated
:param spectrograms: the test set spectrograms as an np.array
:param test_classes: the ground truth labels
:return: accuracy, auc_roc, hamming_error
"""
test_pred = np.round(test_pred_prob)
# Accuracy
accuracy = 100 * accuracy_score(test_classes, test_pred)
print("Exact match accuracy is: " + str(accuracy) + "%")
# Area Under the Receiver Operating Characteristic Curve (ROC AUC)
auc_roc = roc_auc_score(test_classes, test_pred_prob)
print("Macro Area Under the Curve (AUC) is: " + str(auc_roc))
auc_roc_micro = roc_auc_score(test_classes, test_pred_prob, average="micro")
print("Micro Area Under the Curve (AUC) is: " + str(auc_roc_micro))
auc_roc_weighted = roc_auc_score(test_classes, test_pred_prob, average="weighted")
print("Weighted Area Under the Curve (AUC) is: " + str(auc_roc_weighted))
# Hamming loss is the fraction of labels that are incorrectly predicted.
hamming_error = hamming_loss(test_classes, test_pred)
print("Hamming Loss (ratio of incorrect tags) is: " + str(hamming_error))
with open(evaluation_file_path, "w") as f:
f.write("Exact match accuracy is: " + str(accuracy) + "%\n" + "Area Under the Curve (AUC) is: " + str(auc_roc)
+ "\nMicro AUC is:" + str(auc_roc_micro) + "\nWeighted AUC is:" + str(auc_roc_weighted)
+ "\nHamming Loss (ratio of incorrect tags) is: " + str(hamming_error))
print("saving prediction to disk")
np.savetxt(os.path.join(saving_path, 'predictions.out'), test_pred_prob, delimiter=',')
np.savetxt(os.path.join(saving_path, 'test_ground_truth_classes.txt'), test_classes, delimiter=',')
return accuracy, auc_roc, hamming_error
def plot_loss_acuracy(epoch_losses_history, epoch_accurcies_history, val_losses_history, val_accuracies_history, path):
# Plot training & validation accuracy values
plt.figure(figsize=(10, 10))
plt.plot(epoch_accurcies_history)
plt.plot(val_accuracies_history)
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left')
plt.savefig(os.path.join(path, "model_accuracy.png"))
plt.savefig(os.path.join(path, "model_accuracy.pdf"), format='pdf')
# plt.savefig(os.path.join(path,label + "_model_accuracy.eps"), format='eps', dpi=900)
# Plot training & validation loss values
plt.figure(figsize=(10, 10))
plt.plot(epoch_losses_history)
plt.plot(val_losses_history)
plt.title('Model loss (Cross Entropy without weighting)')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left')
plt.savefig(os.path.join(path, "model_loss.png"))
plt.savefig(os.path.join(path, "model_loss.pdf"), format='pdf')
# plt.savefig(os.path.join(path,label + "_model_loss.eps"), format='eps', dpi=900)
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def load_predictions_groundtruth(predictions_path, groundtruth_path):
test_pred_prob = np.loadtxt(predictions_path, delimiter=',')
test_classes = np.loadtxt(groundtruth_path, delimiter=',')
return test_pred_prob, test_classes
def plot_output_coocurances(model_output_rounded, output_path, LABELS_LIST):
# Getting coocuarances
test_pred_df = pd.DataFrame(model_output_rounded, columns=LABELS_LIST)
coocurrances = pd.DataFrame(columns=test_pred_df.columns)
for column in test_pred_df.columns:
coocurrances[column] = test_pred_df[test_pred_df[column] == 1].sum()
coocurrances = coocurrances.T
# Plotting coocurances
plt.figure(figsize=(30, 30));
sn.set(font_scale=2) # for label size
cmap = 'PuRd'
plt.axes([.1, .1, .8, .7])
plt.figtext(.5, .83, 'Number of track coocurances in model output', fontsize=34, ha='center')
sn.heatmap(coocurrances, annot=True, annot_kws={"size": 24}, fmt='.0f', cmap=cmap);
plt.savefig(output_path + ".pdf", format="pdf")
plt.savefig(output_path + ".png")
def plot_false_netgatives_confusion_matrix(model_output_rounded, groundtruth, output_path, LABELS_LIST):
# Getting false negatives coocuarances
test_pred_df = pd.DataFrame(model_output_rounded, columns=LABELS_LIST)
test_classes_df = pd.DataFrame(groundtruth, columns=LABELS_LIST)
FN_coocurrances = pd.DataFrame(columns=test_pred_df.columns)
for column in test_pred_df.columns:
FN_coocurrances[column] = test_pred_df[[negative_prediction and positive_sample
for negative_prediction, positive_sample in
zip(test_pred_df[column] == 0, test_classes_df[column] == 1)]].sum()
FN_coocurrances = FN_coocurrances.T
# Plotting coocurances
plt.figure(figsize=(30, 30));
sn.set(font_scale=2) # for label size
cmap = 'PuRd'
plt.axes([.1, .1, .8, .7])
plt.figtext(.5, .83, 'False negatives confusion matrix', fontsize=34, ha='center')
sn.heatmap(FN_coocurrances, annot=True, annot_kws={"size": 24}, fmt='.0f', cmap=cmap);
plt.savefig(output_path + ".pdf", format="pdf")
plt.savefig(output_path + ".png")
def plot_true_poisitve_vs_all_positives(model_output_rounded, groundtruth, output_path, LABELS_LIST):
# Creating a plot of true positives vs all positives
true_positives_perclass = sum((model_output_rounded == groundtruth) * (groundtruth == 1))
true_positives_df = pd.DataFrame(columns=LABELS_LIST)
true_positives_df.index.astype(str, copy=False)
true_positives_df.loc[0] = true_positives_perclass
percentage_of_positives_perclass = sum(groundtruth)
true_positives_df.loc[1] = percentage_of_positives_perclass
true_positives_df.index = ['True Positives', 'Positive Samples']
true_positives_ratio_perclass = sum((model_output_rounded == groundtruth) * (groundtruth == 1)) / sum(groundtruth)
# Plot the figure
labels = [label + " (" + "{:.1f}".format(true_positives_ratio_perclass[idx] * 100) + "%) " for idx, label in
enumerate(LABELS_LIST)]
true_positives_df.columns = labels
true_positives_df.T.plot.bar(figsize=(32, 22), fontsize=28)
plt.xticks(rotation=45)
plt.title(
"Number of true positive per class compared to the total number of positive samples \n Average true positive rate: " + "{:.2f}".format(
true_positives_ratio_perclass.mean()))
plt.savefig(output_path + ".pdf", format="pdf")
plt.savefig(output_path + ".png")
def create_analysis_report(model_output, groundtruth, output_path, LABELS_LIST, validation_output=None,
validation_groundtruth=None):
"""
Create a report of all the different evaluation metrics, including optimizing the threshold with the validation set
if it is passed in the parameters
"""
# Round the probabilities at 0.5
model_output_rounded = np.round(model_output)
model_output_rounded = np.clip(model_output_rounded, 0, 1)
# Create a dataframe where we keep all the evaluations, starting by prediction accuracy
accuracies_perclass = sum(model_output_rounded == groundtruth) / len(groundtruth)
results_df = pd.DataFrame(columns=LABELS_LIST)
results_df.index.astype(str, copy=False)
percentage_of_positives_perclass = sum(groundtruth) / len(groundtruth)
results_df.loc[0] = percentage_of_positives_perclass
results_df.loc[1] = accuracies_perclass
results_df.index = ['Ratio of positive samples', 'Model accuracy']
# plot the accuracies per class
results_df.T.plot.bar(figsize=(22, 12), fontsize=18)
plt.title('Model accuracy vs the ratio of positive samples per class')
plt.xticks(rotation=45)
plt.savefig(os.path.join(output_path, "accuracies_vs_positiveRate.pdf"), format="pdf")
plt.savefig(os.path.join(output_path, "accuracies_vs_positiveRate.png"))
# Getting the true positive rate perclass
true_positives_ratio_perclass = sum((model_output_rounded == groundtruth) * (groundtruth == 1)) / sum(groundtruth)
results_df.loc[2] = true_positives_ratio_perclass
# Get true negative ratio
true_negative_ratio_perclass = sum((model_output_rounded == groundtruth)
* (groundtruth == 0)) / (len(groundtruth) - sum(groundtruth))
results_df.loc[3] = true_negative_ratio_perclass
# compute additional metrics (AUC,f1,recall,precision)
auc_roc_per_label = roc_auc_score(groundtruth, model_output, average=None)
precision_perlabel = precision_score(groundtruth, model_output_rounded, average=None)
recall_perlabel = recall_score(groundtruth, model_output_rounded, average=None)
f1_perlabel = f1_score(groundtruth, model_output_rounded, average=None)
kappa_perlabel = [cohen_kappa_score(groundtruth[:, x], model_output_rounded[:, x]) for x in range(len(LABELS_LIST))]
results_df = results_df.append(
pd.DataFrame([auc_roc_per_label,recall_perlabel, precision_perlabel, f1_perlabel, kappa_perlabel], columns=LABELS_LIST))
results_df.index = ['Ratio of positive samples', 'Model accuracy', 'True positives ratio',
'True negatives ratio', "AUC", "Recall", "Precision", "f1-score", "Kappa score"]
# Creating evaluation plots
plot_true_poisitve_vs_all_positives(model_output_rounded, groundtruth,
os.path.join(output_path, 'TruePositive_vs_allPositives'), LABELS_LIST)
plot_output_coocurances(model_output_rounded, os.path.join(output_path, 'output_coocurances'), LABELS_LIST)
plot_false_netgatives_confusion_matrix(model_output_rounded, groundtruth,
os.path.join(output_path, 'false_negative_coocurances'), LABELS_LIST)
# Adjusting threshold based on validation set
if (validation_groundtruth is not None and validation_output is not None):
np.savetxt(os.path.join(output_path, 'validation_predictions.out'), validation_output, delimiter=',')
np.savetxt(os.path.join(output_path, 'valid_ground_truth_classes.txt'), validation_groundtruth, delimiter=',')
thresholds = np.arange(0, 1, 0.01)
f1_array = np.zeros((len(LABELS_LIST), len(thresholds)))
for idx, label in enumerate(LABELS_LIST):
f1_array[idx, :] = [
f1_score(validation_groundtruth[:, idx], np.clip(np.round(validation_output[:, idx] - threshold + 0.5), 0, 1))
for threshold in thresholds]
threshold_arg = np.argmax(f1_array, axis=1)
threshold_per_class = thresholds[threshold_arg]
# plot the f1 score across thresholds
plt.figure(figsize=(20, 20))
for idx, x in enumerate(LABELS_LIST):
plt.plot(thresholds, f1_array[idx, :], linewidth=5)
plt.legend(LABELS_LIST, loc='best')
plt.title("F1 Score vs different prediction threshold values for each class")
plt.savefig(os.path.join(output_path, "f1_score_vs_thresholds.pdf"), format="pdf")
plt.savefig(os.path.join(output_path, "f1_score_vs_thresholds.png"))
# Applying thresholds optimized per class
model_output_rounded = np.zeros_like(model_output)
for idx, label in enumerate(LABELS_LIST):
model_output_rounded[:, idx] = np.clip(np.round(model_output[:, idx] - threshold_per_class[idx] + 0.5), 0, 1)
accuracies_perclass = sum(model_output_rounded == groundtruth) / len(groundtruth)
# Getting the true positive rate perclass
true_positives_ratio_perclass = sum((model_output_rounded == groundtruth) * (groundtruth == 1)) / sum(
groundtruth)
# Get true negative ratio
true_negative_ratio_perclass = sum((model_output_rounded == groundtruth)
* (groundtruth == 0)) / (len(groundtruth) - sum(groundtruth))
results_df = results_df.append(
pd.DataFrame([accuracies_perclass, true_positives_ratio_perclass,
true_negative_ratio_perclass], columns=LABELS_LIST))
# compute additional metrics (AUC,f1,recall,precision)
auc_roc_per_label = roc_auc_score(groundtruth, model_output, average=None)
precision_perlabel = precision_score(groundtruth, model_output_rounded, average=None)
recall_perlabel = recall_score(groundtruth, model_output_rounded, average=None)
f1_perlabel = f1_score(groundtruth, model_output_rounded, average=None)
kappa_perlabel = [cohen_kappa_score(groundtruth[:, x], model_output_rounded[:, x]) for x in
range(len(LABELS_LIST))]
results_df = results_df.append(
pd.DataFrame([auc_roc_per_label, precision_perlabel, recall_perlabel, f1_perlabel,kappa_perlabel],
columns=LABELS_LIST))
results_df.index = ['Ratio of positive samples', 'Model accuracy', 'True positives ratio',
'True negatives ratio', "AUC", "Precision", "Recall", "f1-score", "Kappa score",
'Optimized model accuracy', 'Optimized true positives ratio',
'Optimized true negatives ratio', "Optimized AUC",
"Optimized precision", "Optimized recall", "Optimized f1-score", "Optimized Kappa score"]
# Creating evaluation plots
plot_true_poisitve_vs_all_positives(model_output_rounded, groundtruth,
os.path.join(output_path, 'TruePositive_vs_allPositives[optimized]'),
LABELS_LIST)
plot_output_coocurances(model_output_rounded, os.path.join(output_path, 'output_coocurances[optimized]'),
LABELS_LIST)
plot_false_netgatives_confusion_matrix(model_output_rounded, groundtruth,
os.path.join(output_path, 'false_negative_coocurances[optimized]'),
LABELS_LIST)
results_df['average'] = results_df.mean(numeric_only=True, axis=1)
results_df.T.to_csv(os.path.join(output_path, "results_report.csv"), float_format="%.2f")
return results_df
def main():
print("Current Experiment: " + EXPERIMENTNAME + "\n\n\n")
# Loading datasets
# TODO: fix directories
# Setting up model
y = tf.placeholder(tf.float32, [None, len(LABELS_LIST)], name="true_labels")
x_input = tf.placeholder(tf.float32, [None, 646, 96, 1], name="input")
embeddings_input = tf.placeholder(tf.float32, [None, EMBEDDINGS_DIM], name="input_embeddings")
current_keep_prob = tf.placeholder(tf.float32, name="dropout_rate")
weights = tf.constant(POS_WEIGHTS)
train_phase = tf.placeholder(tf.bool, name="is_training")
logits, model_output = get_model(x_input, embeddings_input, current_keep_prob, train_phase)
# Defining loss and metrics
#loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y))
'''
These following lines are needed for batch normalization to work properly
check https://timodenk.com/blog/tensorflow-batch-normalization/
'''
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# Setting up saving directory
experiment_name = strftime("%Y-%m-%d_%H-%M-%S", localtime())
exp_dir = os.path.join(OUTPUT_PATH, EXPERIMENTNAME, experiment_name)
extra_exp_dir = os.path.join(EXTRA_OUTPUTS, EXPERIMENTNAME, "2020-05-06_21-35-34/")
os.makedirs(exp_dir, exist_ok=True)
saver = tf.train.Saver()
with tf.Session() as sess:
# Write summaries to LOG_DIR -- used by TensorBoard
sess.run(tf.global_variables_initializer())
# Loading model with best validation
saver.restore(sess, os.path.join(extra_exp_dir, "best_validation.ckpt"))
print("Model with best validation restored before testing.")
test_labels = pd.read_csv(os.path.join(SOURCE_PATH, "GroundTruth/test_single.csv"))
test_dataset = get_dataset(os.path.join(SOURCE_PATH, "GroundTruth/test_single.csv"),shuffle = False)
test_classes = np.zeros_like(test_labels.iloc[:, 2:].values, dtype=float)
# test_images, test_classes = load_test_set_raw(test_split)
TEST_NUM_STEPS = int(np.floor((len(test_classes) / 32)))
# split_size = int(len(test_classes) / TEST_NUM_STEPS)
test_pred_prob = np.zeros_like(test_classes, dtype=float)
test_iterator = test_dataset.make_one_shot_iterator()
test_next_element = test_iterator.get_next()
test_song_ids = np.zeros([test_classes.shape[0],1])
test_user_ids = np.zeros([test_classes.shape[0],1])
for test_batch_counter in range(TEST_NUM_STEPS):
start_idx = (test_batch_counter * BATCH_SIZE)
end_idx = (test_batch_counter * BATCH_SIZE) + BATCH_SIZE
test_batch = sess.run(test_next_element)
test_batch_images = test_batch[0]
test_batch_labels = np.squeeze(test_batch[1])
test_embeddings = np.squeeze(test_batch[2])
test_song_ids[start_idx:end_idx] = test_batch[3].reshape([-1, 1])
test_user_ids[start_idx:end_idx] = test_batch[4].reshape([-1, 1])
test_classes[start_idx:end_idx, :] = test_batch_labels
test_pred_prob[start_idx:end_idx, :] = sess.run(model_output,
feed_dict={x_input: test_batch_images,
embeddings_input: test_embeddings,
current_keep_prob: 1.0,
train_phase: False})
np.savetxt(os.path.join(exp_dir, 'tracks_ids.txt'), test_song_ids, delimiter=',')
np.savetxt(os.path.join(exp_dir, 'user_ids.txt'), test_user_ids, delimiter=',')
accuracy_out, auc_roc, hamming_error = evaluate_model(test_pred_prob, test_classes,
saving_path=exp_dir,
evaluation_file_path= \
os.path.join(exp_dir, "evaluation_results.txt"))
results = create_analysis_report(test_pred_prob, test_classes, exp_dir, LABELS_LIST)
if __name__ == "__main__":
main()
| [
"karim.m.ibraheem@gmail.com"
] | karim.m.ibraheem@gmail.com |
2d40a7cc4ebc24dcddff212a03f10a3cfc651379 | b92ad73d6598e56163bf3fc9876b77f7665928f2 | /recipes/migrations/0002_recipe_category.py | a7e10858719b537de41696dc7f8e62b92aa344b0 | [] | no_license | HubLen1/Project | 9cf96491c67293352e3183d355f09371ae35803d | f2eb2ea1f415ff66b2fa89274d3505060c82e9ca | refs/heads/master | 2022-12-14T11:02:16.095144 | 2019-08-12T10:38:28 | 2019-08-12T10:38:28 | 201,909,550 | 1 | 0 | null | 2022-12-08T06:00:29 | 2019-08-12T10:34:10 | JavaScript | UTF-8 | Python | false | false | 427 | py | # Generated by Django 2.2.2 on 2019-07-15 13:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='category',
field=models.CharField(default='abc', max_length=100),
preserve_default=False,
),
]
| [
"hublenda@aol.com"
] | hublenda@aol.com |
a5138853017429577f31d3340e20b777e9fa15b7 | e5ff247ffe0a72bc5ef5d311ff6a9d538c3bd652 | /interview_cake/orders_FIFO.py | 629c3aaedf1cab7d44dbfedf815df464868afad0 | [] | no_license | WestComputing/PyProblemsJS | 72426926580b8b326e25a179af6994f638171215 | 2f0163f9751b26e301af1dbd6d37c3579b21556b | refs/heads/master | 2023-03-12T23:41:28.280072 | 2021-02-19T13:53:10 | 2021-02-19T13:53:10 | 255,604,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | def check_fifo(q1, q2, out):
q1index = q2index = 0
for order in out:
if q1index < len(q1) and q1[q1index] == order:
q1index += 1
continue
if q2index < len(q2) and q2[q2index] == order:
q2index += 1
continue
return False
return q1index == len(q1) and q2index == len(q2)
# fail:
print(check_fifo([1, 3, 5], [2, 4, 6],[1, 2, 4, 6, 5, 3]))
print(check_fifo([17, 8, 24], [12, 19, 2], [17, 12, 19, 24, 2]))
print(check_fifo([17, 24], [12, 19, 2], [17, 8, 12, 19, 24, 2]))
# pass
print(check_fifo([17, 8, 24], [12, 19, 2], [17, 8, 12, 19, 24, 2]))
| [
"west221b@gmail.com"
] | west221b@gmail.com |
7e2a67db930a6ea17d2bd8cc7a88bf10d46aef32 | 0fde82bf8a529c1045595b0f451dc7005a9d90cf | /generate/test.py | 23d7846f745c166c7e2e9d872e50a0103ee75942 | [
"MIT"
] | permissive | cosinekitty/astronomy | 18ccec1d62bbae24a8424b3eb807f4b3b97c04a1 | 056076b85aa591ba756ef8a24fc017ddb3b3029b | refs/heads/master | 2023-09-04T18:00:34.676849 | 2023-08-28T19:20:08 | 2023-08-28T19:20:08 | 180,022,816 | 305 | 56 | MIT | 2023-08-28T19:20:10 | 2019-04-07T20:55:20 | C | UTF-8 | Python | false | false | 159,205 | py | #!/usr/bin/env python3
import sys
import math
import re
import os
from itertools import chain
sys.path.append('../source/python')
import astronomy
#-----------------------------------------------------------------------------------------------------------
Verbose = False
SECONDS_PER_DAY = 86400.0
MINUTES_PER_DAY = 1440.0
def Debug(text):
if Verbose:
print(text)
def Pass(funcname):
print('PY {}: PASS'.format(funcname))
return 0
def Fail(funcname, reason):
print('PY {} FAIL: {}'.format(funcname, reason))
return 1
def v(x):
# Verify that a number is really numeric
if not isinstance(x, (int, float)):
raise Exception('Not a numeric type: {}'.format(x))
if not math.isfinite(x):
raise Exception('Not a finite numeric value: {}'.format(x))
return x
def vabs(x):
return abs(v(x))
def vmax(a, b):
return max(v(a), v(b))
def vmin(a, b):
return min(v(a), v(b))
def sqrt(x):
return v(math.sqrt(v(x)))
def AssertGoodTime(text, correct):
time = astronomy.Time.Parse(text)
check = str(time)
if check != correct:
print('Python AssertGoodTime FAILURE: parsed "{}", got "{}", expected "{}"'.format(text, check, correct))
sys.exit(1)
Debug('PY AssertGoodTime: "{}" OK'.format(text))
def AssertBadTime(text):
try:
astronomy.Time.Parse(text)
except astronomy.DateTimeFormatError:
Debug('PY AssertBadTime: "{}" OK'.format(text))
else:
print('PY AssertBadTime FAILURE: should not have parsed "{}"'.format(text))
sys.exit(1)
def CalendarCase(year, month, day, hour, minute, second):
# Convert to Astronomy Engine Time object.
time = astronomy.Time.Make(year, month, day, hour, minute, second)
# Convert to back calendar date tuple.
(cyear, cmonth, cday, chour, cminute, csecond) = time.Calendar()
if (cyear, cmonth, cday) != (year, month, day):
return Fail('CalendarCase', 'Expected {:06d}-{:02d}-{:02d} but found {:06d}-{:02d}-{:02d}'.format(
year, month, day,
cyear, cmonth, cday
))
expectedMillis = 1000.0*(second + 60.0*(minute + 60.0*hour))
calcMillis = 1000.0*(csecond + 60.0*(cminute + 60.0*chour))
diffMillis = vabs(calcMillis - expectedMillis)
if diffMillis > 4.0:
return Fail('CalendarCase', 'EXCESSIVE millisecond error = {:0.6f} for {:06d}-{:02d}-{:02d}'.format(
diffMillis, year, month, day
))
return 0
def AstroTime():
expected_ut = 6910.270978506945
expected_tt = 6910.271800214368
time = astronomy.Time.Make(2018, 12, 2, 18, 30, 12.543)
diff = time.ut - expected_ut
if vabs(diff) > 1.0e-12:
print('PY AstroTime: excessive UT error {}'.format(diff))
return 1
diff = time.tt - expected_tt
if vabs(diff) > 1.0e-12:
print('PY AstroTime: excessive TT error {}'.format(diff))
return 1
s = str(time.Utc())
if s != '2018-12-02 18:30:12.543000':
print('PY AstroTime: Utc() returned incorrect string "{}"'.format(s))
return 1
time = astronomy.Time.Make(2018, 12, 31, 23, 59, 59.9999)
expected = '2018-12-31T23:59:59.999Z'
s = str(time)
if s != expected:
print('PY AstroTime: expected {} but found {}'.format(expected, s))
return 1
print('PY Current time =', astronomy.Time.Now())
AssertGoodTime('2015-12-31T23:45Z', '2015-12-31T23:45:00.000Z')
AssertGoodTime('2015-01-02T23:45:17Z', '2015-01-02T23:45:17.000Z')
AssertGoodTime('1971-03-17T03:30:55.976Z', '1971-03-17T03:30:55.976Z')
AssertBadTime('')
AssertBadTime('1971-13-01')
AssertBadTime('1971-12-32')
AssertBadTime('1971-12-31T24:00:00Z')
AssertBadTime('1971-12-31T23:60:00Z')
AssertBadTime('1971-12-31T23:00:60Z')
AssertBadTime('1971-03-17T03:30:55.976')
# Extreme year values...
AssertGoodTime('-4172-12-02T14:30:45.123Z', '-004172-12-02T14:30:45.123Z')
AssertGoodTime('-4173-12-02T14:30:45.123Z', '-004173-12-02T14:30:45.123Z')
AssertGoodTime('-4174-12-02T14:30:45.123Z', '-004174-12-02T14:30:45.123Z')
AssertGoodTime('-4175-12-02T14:30:45.123Z', '-004175-12-02T14:30:45.123Z')
AssertGoodTime('-4176-12-02T14:30:45.123Z', '-004176-12-02T14:30:45.123Z')
AssertGoodTime('-2300-12-19T16:22:26.325Z', '-002300-12-19T16:22:26.325Z')
AssertGoodTime('-2300-12-19T16:22:26.325Z', '-002300-12-19T16:22:26.325Z')
AssertGoodTime('+12345-12-11T13:30:10.041Z', '+012345-12-11T13:30:10.040Z')
AssertGoodTime('+12346-12-11T13:30:10.041Z', '+012346-12-11T13:30:10.040Z')
AssertGoodTime('+12347-12-11T13:30:10.041Z', '+012347-12-11T13:30:10.040Z')
AssertGoodTime('+12348-12-11T13:30:10.041Z', '+012348-12-11T13:30:10.040Z')
AssertGoodTime('-123456-01-14T22:55:12.000Z', '-123456-01-14T22:55:11.999Z')
AssertGoodTime('+123456-01-14T22:55:12.000Z', '+123456-01-14T22:55:11.999Z')
AssertGoodTime('-999995-01-14T22:55:12.297Z', '-999995-01-14T22:55:12.297Z')
AssertGoodTime('-999996-01-14T22:55:12.297Z', '-999996-01-14T22:55:12.297Z')
AssertGoodTime('-999997-01-14T22:55:12.297Z', '-999997-01-14T22:55:12.297Z')
AssertGoodTime('-999998-01-14T22:55:12.297Z', '-999998-01-14T22:55:12.297Z')
AssertGoodTime('-999999-01-14T22:55:12.000Z', '-999999-01-14T22:55:11.998Z')
AssertGoodTime('+999999-01-14T22:55:12.000Z', '+999999-01-14T22:55:11.998Z')
nyears = 0
for year in chain(range(-999999, -995999), range(-3000, 3001), range(+996000, +1000000)):
# Check just before and after each potential leap day.
if CalendarCase(year, 2, 28, 14, 45, 28.321):
return 1
if CalendarCase(year, 3, 1, 14, 45, 28.321):
return 1
nyears += 1
return Pass('AstroTime({} calendar years)'.format(nyears))
#-----------------------------------------------------------------------------------------------------------
def GeoMoon():
time = astronomy.Time.Make(2019, 6, 24, 15, 45, 37)
vec = astronomy.GeoMoon(time)
print('PY GeoMoon: vec = {:0.16f}, {:0.16f}, {:0.16f}'.format(vec.x, vec.y, vec.z))
# Correct values obtained from C version of GeoMoon calculation
cx, cy, cz = +0.002674037026701135, -0.0001531610316600666, -0.0003150159927069429
dx, dy, dz = vec.x - cx, vec.y - cy, vec.z - cz
diff = sqrt(dx*dx + dy*dy + dz*dz)
print('PY GeoMoon: diff = {}'.format(diff))
if diff > 4.34e-19:
print('PY GeoMoon: EXCESSIVE ERROR')
return 1
return 0
#-----------------------------------------------------------------------------------------------------------
def SelectJupiterMoon(jm, mindex):
return [jm.io, jm.europa, jm.ganymede, jm.callisto][mindex]
def AstroCheck(printflag):
time = astronomy.Time.Make(1700, 1, 1, 0, 0, 0)
stop = astronomy.Time.Make(2200, 1, 1, 0, 0, 0)
observer = astronomy.Observer(29, -81, 10)
if printflag:
print('o {:0.6f} {:0.6f} {:0.6f}'.format(observer.latitude, observer.longitude, observer.height))
dt = 10 + math.pi/100
bodylist = [
astronomy.Body.Sun, astronomy.Body.Moon, astronomy.Body.Mercury, astronomy.Body.Venus,
astronomy.Body.Earth, astronomy.Body.Mars, astronomy.Body.Jupiter, astronomy.Body.Saturn,
astronomy.Body.Uranus, astronomy.Body.Neptune, astronomy.Body.Pluto,
astronomy.Body.SSB, astronomy.Body.EMB
]
while time.tt < stop.tt:
for body in bodylist:
name = body.name
if body != astronomy.Body.Moon:
pos = astronomy.HelioVector(body, time)
if printflag:
print('v {} {:0.18e} {:0.18e} {:0.18e} {:0.18e}'.format(name, pos.t.tt, pos.x, pos.y, pos.z))
if body != astronomy.Body.Earth and body != astronomy.Body.EMB and body != astronomy.Body.SSB:
j2000 = astronomy.Equator(body, time, observer, False, False)
ofdate = astronomy.Equator(body, time, observer, True, True)
hor = astronomy.Horizon(time, observer, ofdate.ra, ofdate.dec, astronomy.Refraction.Airless)
if printflag:
print('s {} {:0.18e} {:0.18e} {:0.18e} {:0.18e} {:0.18e} {:0.18e} {:0.18e}'.format(name, time.tt, time.ut, j2000.ra, j2000.dec, j2000.dist, hor.azimuth, hor.altitude))
pos = astronomy.GeoMoon(time)
if printflag:
print('v GM {:0.18e} {:0.18e} {:0.18e} {:0.18e}'.format(pos.t.tt, pos.x, pos.y, pos.z))
j2000 = astronomy.Equator(astronomy.Body.Moon, time, observer, False, False)
ofdate = astronomy.Equator(astronomy.Body.Moon, time, observer, True, True)
hor = astronomy.Horizon(time, observer, ofdate.ra, ofdate.dec, astronomy.Refraction.Airless)
if printflag:
print('s GM {:0.18e} {:0.18e} {:0.18e} {:0.18e} {:0.18e} {:0.18e} {:0.18e}'.format(time.tt, time.ut, j2000.ra, j2000.dec, j2000.dist, hor.azimuth, hor.altitude))
jm = astronomy.JupiterMoons(time)
if printflag:
for mindex in range(4):
moon = SelectJupiterMoon(jm, mindex)
print('j {:d} {:0.18e} {:0.18e} {:0.18e} {:0.18e} {:0.18e} {:0.18e} {:0.18e} {:0.18e}'.format(mindex, time.tt, time.ut, moon.x, moon.y, moon.z, moon.vx, moon.vy, moon.vz))
if printflag:
# Nutation calculations
print('n {:0.18e} {:0.18e}'.format(time._et.dpsi, time._et.deps))
sphere = astronomy.EclipticGeoMoon(time)
if printflag:
print('m {:0.18f} {:0.18f} {:0.18f}'.format(sphere.lat, sphere.lon, sphere.dist))
time = time.AddDays(dt)
return 0
#-----------------------------------------------------------------------------------------------------------
def Seasons(filename = 'seasons/seasons.txt'):
with open(filename, 'rt') as infile:
lnum = 0
current_year = 0
mar_count = sep_count = jun_count = dec_count = 0
max_minutes = 0.0
for line in infile:
lnum += 1
line = line.strip()
m = re.match(r'^(\d+)-(\d+)-(\d+)T(\d+):(\d+)Z\s+([A-Za-z]+)$', line)
if not m:
print('PY Seasons: Invalid data on line {} of file {}'.format(lnum, filename))
return 1
year = int(m.group(1))
month = int(m.group(2))
day = int(m.group(3))
hour = int(m.group(4))
minute = int(m.group(5))
name = m.group(6)
if year != current_year:
current_year = year
seasons = astronomy.Seasons(year)
correct_time = astronomy.Time.Make(year, month, day, hour, minute, 0)
if name == 'Equinox':
if month == 3:
calc_time = seasons.mar_equinox
mar_count += 1
elif month == 9:
calc_time = seasons.sep_equinox
sep_count += 1
else:
print('PY Seasons: {} line {}: Invalid equinox date in test data'.format(filename, lnum))
return 1
elif name == 'Solstice':
if month == 6:
calc_time = seasons.jun_solstice
jun_count += 1
elif month == 12:
calc_time = seasons.dec_solstice
dec_count += 1
else:
print('PY Seasons: {} line {}: Invalid solstice date in test data'.format(filename, lnum))
return 1
elif name == 'Aphelion':
continue # not yet calculated
elif name == 'Perihelion':
continue # not yet calculated
else:
print('PY Seasons: {} line {}: unknown event type {}'.format(filename, lnum, name))
return 1
# Verify that the calculated time matches the correct time for this event.
diff_minutes = (24.0 * 60.0) * vabs(calc_time.tt - correct_time.tt)
if diff_minutes > max_minutes:
max_minutes = diff_minutes
if diff_minutes > 2.37:
print('PY Seasons: {} line {}: excessive error ({}): {} minutes.'.format(filename, lnum, name, diff_minutes))
return 1
print('PY Seasons: verified {} lines from file {} : max error minutes = {:0.3f}'.format(lnum, filename, max_minutes))
print('PY Seasons: Event counts: mar={}, jun={}, sep={}, dec={}'.format(mar_count, jun_count, sep_count, dec_count))
return 0
def SeasonsIssue187():
# This is a regression test for:
# https://github.com/cosinekitty/astronomy/issues/187
# For years far from the present, the seasons search was sometimes failing.
for year in range(1, 9999, 1):
try:
astronomy.Seasons(year)
except astronomy.InternalError:
print('PY SeasonsIssue187: FAIL - internal error for year {}'.format(year))
return 1
print('PY SeasonsIssue187: PASS')
return 0
#-----------------------------------------------------------------------------------------------------------
def MoonPhase(filename = 'moonphase/moonphases.txt'):
threshold_seconds = 90.0 # max tolerable prediction error in seconds
max_arcmin = 0.0
maxdiff = 0.0
quarter_count = 0
with open(filename, 'rt') as infile:
lnum = 0
prev_year = 0
for line in infile:
lnum += 1
line = line.strip()
m = re.match(r'^([0-3]) (\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+\.\d+)Z$', line)
if not m:
print('PY MoonPhase: invalid data format in {} line {}'.format(filename, lnum))
return 1
quarter = int(m.group(1))
year = int(m.group(2))
month = int(m.group(3))
day = int(m.group(4))
hour = int(m.group(5))
minute = int(m.group(6))
second = float(m.group(7))
expected_elong = 90.0 * quarter
expected_time = astronomy.Time.Make(year, month, day, hour, minute, second)
angle = astronomy.MoonPhase(expected_time)
degree_error = vabs(angle - expected_elong)
if degree_error > 180.0:
degree_error = 360.0 - degree_error
arcmin = 60.0 * degree_error
if arcmin > 1.0:
print('PY MoonPhase({} line {}): EXCESSIVE ANGULAR ERROR: {} arcmin'.format(filename, lnum, arcmin))
return 1
max_arcmin = vmax(max_arcmin, arcmin)
if year != prev_year:
prev_year = year
# The test data contains a single year's worth of data for every 10 years.
# Every time we see the year value change, it breaks continuity of the phases.
# Start the search over again.
start_time = astronomy.Time.Make(year, 1, 1, 0, 0, 0.0)
mq = astronomy.SearchMoonQuarter(start_time)
else:
# Yet another lunar quarter in the same year.
expected_quarter = (1 + mq.quarter) % 4
mq = astronomy.NextMoonQuarter(mq)
# Expect the next consecutive quarter.
if expected_quarter != mq.quarter:
print('PY MoonPhase({} line {}): SearchMoonQuarter returned quarter {}, but expected {}.'.format(filename, lnum, mq.quarter, expected_quarter))
return 1
quarter_count += 1
# Make sure the time matches what we expect.
diff_seconds = vabs(mq.time.tt - expected_time.tt) * SECONDS_PER_DAY
if diff_seconds > threshold_seconds:
print('PY MoonPhase({} line {}): excessive time error {:0.3f} seconds.'.format(filename, lnum, diff_seconds))
return 1
maxdiff = vmax(maxdiff, diff_seconds)
print('PY MoonPhase: passed {} lines for file {} : max_arcmin = {:0.6f}, maxdiff = {:0.3f} seconds, {} quarters.'
.format(lnum, filename, max_arcmin, maxdiff, quarter_count))
return 0
#-----------------------------------------------------------------------------------------------------------
def TestElongFile(filename, targetRelLon):
with open(filename, 'rt') as infile:
lnum = 0
for line in infile:
lnum += 1
line = line.strip()
m = re.match(r'^(\d+)-(\d+)-(\d+)T(\d+):(\d+)Z ([A-Za-z]+)$', line)
if not m:
print('PY TestElongFile({} line {}): invalid data format'.format(filename, lnum))
return 1
year = int(m.group(1))
month = int(m.group(2))
day = int(m.group(3))
hour = int(m.group(4))
minute = int(m.group(5))
name = m.group(6)
body = astronomy.BodyCode(name)
if body.value == astronomy.Body.Invalid:
print('PY TestElongFile({} line {}): invalid body name "{}"'.format(filename, lnum, name))
return 1
search_time = astronomy.Time.Make(year, 1, 1, 0, 0, 0)
expected_time = astronomy.Time.Make(year, month, day, hour, minute, 0)
found_time = astronomy.SearchRelativeLongitude(body, targetRelLon, search_time)
if found_time is None:
print('PY TestElongFile({} line {}): SearchRelativeLongitude failed.'.format(filename, lnum))
return 1
diff_minutes = (24.0 * 60.0) * (found_time.tt - expected_time.tt)
Debug('PY TestElongFile: {:<7s} error = {:6.3} minutes'.format(name, diff_minutes))
if vabs(diff_minutes) > 6.8:
print('PY TestElongFile({} line {}): EXCESSIVE ERROR.'.format(filename, lnum))
return 1
print('PY TestElongFile: passed {} rows of data'.format(lnum))
return 0
def TestPlanetLongitudes(body, outFileName, zeroLonEventName):
startYear = 1700
stopYear = 2200
rlon = 0.0
sum_diff = 0.0
count = 0
name = body.name
with open(outFileName, 'wt') as outfile:
time = astronomy.Time.Make(startYear, 1, 1, 0, 0, 0)
stopTime = astronomy.Time.Make(stopYear, 1, 1, 0, 0, 0)
while time.tt < stopTime.tt:
count += 1
event = zeroLonEventName if rlon == 0.0 else 'sup'
found_time = astronomy.SearchRelativeLongitude(body, rlon, time)
if found_time is None:
print('PY TestPlanetLongitudes({}): SearchRelativeLongitudes failed'.format(name))
return 1
if count >= 2:
# Check for consistent intervals.
# Mainly I don't want to skip over an event!
day_diff = found_time.tt - time.tt
sum_diff += day_diff
if count == 2:
min_diff = max_diff = day_diff
else:
min_diff = vmin(min_diff, day_diff)
max_diff = vmax(max_diff, day_diff)
geo = astronomy.GeoVector(body, found_time, True)
dist = geo.Length()
outfile.write('e {} {} {:0.16f} {:0.16f}\n'.format(name, event, found_time.tt, dist))
# Search for the opposite longitude vent next time.
time = found_time
rlon = 180.0 - rlon
if body == astronomy.Body.Mercury:
thresh = 1.65
elif body == astronomy.Body.Mars:
thresh = 1.30
else:
thresh = 1.07
ratio = max_diff / min_diff
Debug('PY TestPlanetLongitudes({:<7s}): {:5d} events, ratio={:5.3f}, file: {}'.format(name, count, ratio, outFileName))
if ratio > thresh:
print('PY TestPlanetLongitudes({}): EXCESSIVE EVENT INTERVAL RATIO'.format(name))
return 1
return 0
ElongTestData = [
# Max elongation data obtained from:
# http://www.skycaramba.com/greatest_elongations.shtml
( astronomy.Body.Mercury, "2010-01-17T05:22Z", "2010-01-27T05:22Z", 24.80, 'morning' ),
( astronomy.Body.Mercury, "2010-05-16T02:15Z", "2010-05-26T02:15Z", 25.10, 'morning' ),
( astronomy.Body.Mercury, "2010-09-09T17:24Z", "2010-09-19T17:24Z", 17.90, 'morning' ),
( astronomy.Body.Mercury, "2010-12-30T14:33Z", "2011-01-09T14:33Z", 23.30, 'morning' ),
( astronomy.Body.Mercury, "2011-04-27T19:03Z", "2011-05-07T19:03Z", 26.60, 'morning' ),
( astronomy.Body.Mercury, "2011-08-24T05:52Z", "2011-09-03T05:52Z", 18.10, 'morning' ),
( astronomy.Body.Mercury, "2011-12-13T02:56Z", "2011-12-23T02:56Z", 21.80, 'morning' ),
( astronomy.Body.Mercury, "2012-04-08T17:22Z", "2012-04-18T17:22Z", 27.50, 'morning' ),
( astronomy.Body.Mercury, "2012-08-06T12:04Z", "2012-08-16T12:04Z", 18.70, 'morning' ),
( astronomy.Body.Mercury, "2012-11-24T22:55Z", "2012-12-04T22:55Z", 20.60, 'morning' ),
( astronomy.Body.Mercury, "2013-03-21T22:02Z", "2013-03-31T22:02Z", 27.80, 'morning' ),
( astronomy.Body.Mercury, "2013-07-20T08:51Z", "2013-07-30T08:51Z", 19.60, 'morning' ),
( astronomy.Body.Mercury, "2013-11-08T02:28Z", "2013-11-18T02:28Z", 19.50, 'morning' ),
( astronomy.Body.Mercury, "2014-03-04T06:38Z", "2014-03-14T06:38Z", 27.60, 'morning' ),
( astronomy.Body.Mercury, "2014-07-02T18:22Z", "2014-07-12T18:22Z", 20.90, 'morning' ),
( astronomy.Body.Mercury, "2014-10-22T12:36Z", "2014-11-01T12:36Z", 18.70, 'morning' ),
( astronomy.Body.Mercury, "2015-02-14T16:20Z", "2015-02-24T16:20Z", 26.70, 'morning' ),
( astronomy.Body.Mercury, "2015-06-14T17:10Z", "2015-06-24T17:10Z", 22.50, 'morning' ),
( astronomy.Body.Mercury, "2015-10-06T03:20Z", "2015-10-16T03:20Z", 18.10, 'morning' ),
( astronomy.Body.Mercury, "2016-01-28T01:22Z", "2016-02-07T01:22Z", 25.60, 'morning' ),
( astronomy.Body.Mercury, "2016-05-26T08:45Z", "2016-06-05T08:45Z", 24.20, 'morning' ),
( astronomy.Body.Mercury, "2016-09-18T19:27Z", "2016-09-28T19:27Z", 17.90, 'morning' ),
( astronomy.Body.Mercury, "2017-01-09T09:42Z", "2017-01-19T09:42Z", 24.10, 'morning' ),
( astronomy.Body.Mercury, "2017-05-07T23:19Z", "2017-05-17T23:19Z", 25.80, 'morning' ),
( astronomy.Body.Mercury, "2017-09-02T10:14Z", "2017-09-12T10:14Z", 17.90, 'morning' ),
( astronomy.Body.Mercury, "2017-12-22T19:48Z", "2018-01-01T19:48Z", 22.70, 'morning' ),
( astronomy.Body.Mercury, "2018-04-19T18:17Z", "2018-04-29T18:17Z", 27.00, 'morning' ),
( astronomy.Body.Mercury, "2018-08-16T20:35Z", "2018-08-26T20:35Z", 18.30, 'morning' ),
( astronomy.Body.Mercury, "2018-12-05T11:34Z", "2018-12-15T11:34Z", 21.30, 'morning' ),
( astronomy.Body.Mercury, "2019-04-01T19:40Z", "2019-04-11T19:40Z", 27.70, 'morning' ),
( astronomy.Body.Mercury, "2019-07-30T23:08Z", "2019-08-09T23:08Z", 19.00, 'morning' ),
( astronomy.Body.Mercury, "2019-11-18T10:31Z", "2019-11-28T10:31Z", 20.10, 'morning' ),
( astronomy.Body.Mercury, "2010-03-29T23:32Z", "2010-04-08T23:32Z", 19.40, 'evening' ),
( astronomy.Body.Mercury, "2010-07-28T01:03Z", "2010-08-07T01:03Z", 27.40, 'evening' ),
( astronomy.Body.Mercury, "2010-11-21T15:42Z", "2010-12-01T15:42Z", 21.50, 'evening' ),
( astronomy.Body.Mercury, "2011-03-13T01:07Z", "2011-03-23T01:07Z", 18.60, 'evening' ),
( astronomy.Body.Mercury, "2011-07-10T04:56Z", "2011-07-20T04:56Z", 26.80, 'evening' ),
( astronomy.Body.Mercury, "2011-11-04T08:40Z", "2011-11-14T08:40Z", 22.70, 'evening' ),
( astronomy.Body.Mercury, "2012-02-24T09:39Z", "2012-03-05T09:39Z", 18.20, 'evening' ),
( astronomy.Body.Mercury, "2012-06-21T02:00Z", "2012-07-01T02:00Z", 25.70, 'evening' ),
( astronomy.Body.Mercury, "2012-10-16T21:59Z", "2012-10-26T21:59Z", 24.10, 'evening' ),
( astronomy.Body.Mercury, "2013-02-06T21:24Z", "2013-02-16T21:24Z", 18.10, 'evening' ),
( astronomy.Body.Mercury, "2013-06-02T16:45Z", "2013-06-12T16:45Z", 24.30, 'evening' ),
( astronomy.Body.Mercury, "2013-09-29T09:59Z", "2013-10-09T09:59Z", 25.30, 'evening' ),
( astronomy.Body.Mercury, "2014-01-21T10:00Z", "2014-01-31T10:00Z", 18.40, 'evening' ),
( astronomy.Body.Mercury, "2014-05-15T07:06Z", "2014-05-25T07:06Z", 22.70, 'evening' ),
( astronomy.Body.Mercury, "2014-09-11T22:20Z", "2014-09-21T22:20Z", 26.40, 'evening' ),
( astronomy.Body.Mercury, "2015-01-04T20:26Z", "2015-01-14T20:26Z", 18.90, 'evening' ),
( astronomy.Body.Mercury, "2015-04-27T04:46Z", "2015-05-07T04:46Z", 21.20, 'evening' ),
( astronomy.Body.Mercury, "2015-08-25T10:20Z", "2015-09-04T10:20Z", 27.10, 'evening' ),
( astronomy.Body.Mercury, "2015-12-19T03:11Z", "2015-12-29T03:11Z", 19.70, 'evening' ),
( astronomy.Body.Mercury, "2016-04-08T14:00Z", "2016-04-18T14:00Z", 19.90, 'evening' ),
( astronomy.Body.Mercury, "2016-08-06T21:24Z", "2016-08-16T21:24Z", 27.40, 'evening' ),
( astronomy.Body.Mercury, "2016-12-01T04:36Z", "2016-12-11T04:36Z", 20.80, 'evening' ),
( astronomy.Body.Mercury, "2017-03-22T10:24Z", "2017-04-01T10:24Z", 19.00, 'evening' ),
( astronomy.Body.Mercury, "2017-07-20T04:34Z", "2017-07-30T04:34Z", 27.20, 'evening' ),
( astronomy.Body.Mercury, "2017-11-14T00:32Z", "2017-11-24T00:32Z", 22.00, 'evening' ),
( astronomy.Body.Mercury, "2018-03-05T15:07Z", "2018-03-15T15:07Z", 18.40, 'evening' ),
( astronomy.Body.Mercury, "2018-07-02T05:24Z", "2018-07-12T05:24Z", 26.40, 'evening' ),
( astronomy.Body.Mercury, "2018-10-27T15:25Z", "2018-11-06T15:25Z", 23.30, 'evening' ),
( astronomy.Body.Mercury, "2019-02-17T01:23Z", "2019-02-27T01:23Z", 18.10, 'evening' ),
( astronomy.Body.Mercury, "2019-06-13T23:14Z", "2019-06-23T23:14Z", 25.20, 'evening' ),
( astronomy.Body.Mercury, "2019-10-10T04:00Z", "2019-10-20T04:00Z", 24.60, 'evening' ),
( astronomy.Body.Venus, "2010-12-29T15:57Z", "2011-01-08T15:57Z", 47.00, 'morning' ),
( astronomy.Body.Venus, "2012-08-05T08:59Z", "2012-08-15T08:59Z", 45.80, 'morning' ),
( astronomy.Body.Venus, "2014-03-12T19:25Z", "2014-03-22T19:25Z", 46.60, 'morning' ),
( astronomy.Body.Venus, "2015-10-16T06:57Z", "2015-10-26T06:57Z", 46.40, 'morning' ),
( astronomy.Body.Venus, "2017-05-24T13:09Z", "2017-06-03T13:09Z", 45.90, 'morning' ),
( astronomy.Body.Venus, "2018-12-27T04:24Z", "2019-01-06T04:24Z", 47.00, 'morning' ),
( astronomy.Body.Venus, "2010-08-10T03:19Z", "2010-08-20T03:19Z", 46.00, 'evening' ),
( astronomy.Body.Venus, "2012-03-17T08:03Z", "2012-03-27T08:03Z", 46.00, 'evening' ),
( astronomy.Body.Venus, "2013-10-22T08:00Z", "2013-11-01T08:00Z", 47.10, 'evening' ),
( astronomy.Body.Venus, "2015-05-27T18:46Z", "2015-06-06T18:46Z", 45.40, 'evening' ),
( astronomy.Body.Venus, "2017-01-02T13:19Z", "2017-01-12T13:19Z", 47.10, 'evening' ),
( astronomy.Body.Venus, "2018-08-07T17:02Z", "2018-08-17T17:02Z", 45.90, 'evening' )
]
def TestMaxElong(body, searchText, eventText, angle, visibility):
name = body.name
searchTime = astronomy.Time.Parse(searchText)
eventTime = astronomy.Time.Parse(eventText)
evt = astronomy.SearchMaxElongation(body, searchTime)
if evt is None:
print('PY TestMaxElong({} {}): SearchMaxElongation failed.'.format(name, searchText))
return 1
if evt.visibility != visibility:
print('PY TestMaxElong({} {}): SearchMaxElongation returned visibility {}, but expected {}'.format(name, searchText, evt.visibility.name, visibility.name))
return 1
hour_diff = 24.0 * vabs(evt.time.tt - eventTime.tt)
arcmin_diff = 60.0 * vabs(evt.elongation - angle)
Debug('PY TestMaxElong: {:<7s} {:<7s} elong={:5.2f} ({:4.2f} arcmin, {:5.3f} hours)'.format(name, visibility.name, evt.elongation, arcmin_diff, hour_diff))
if hour_diff > 0.6:
print('PY TestMaxElong({} {}): EXCESSIVE HOUR ERROR.'.format(name, searchText))
return 1
if arcmin_diff > 3.4:
print('PY TestMaxElong({} {}): EXCESSIVE ARCMIN ERROR.'.format(name, searchText))
return 1
return 0
def SearchElongTest():
for (body, searchText, eventText, angle, visibility) in ElongTestData:
if 0 != TestMaxElong(body, searchText, eventText, angle, astronomy.Visibility[visibility.title()]):
return 1
return 0
def Elongation():
return (
TestElongFile('longitude/opposition_2018.txt', 0.0) or
TestPlanetLongitudes(astronomy.Body.Mercury, "temp/py_longitude_Mercury.txt", "inf") or
TestPlanetLongitudes(astronomy.Body.Venus, "temp/py_longitude_Venus.txt", "inf") or
TestPlanetLongitudes(astronomy.Body.Mars, "temp/py_longitude_Mars.txt", "opp") or
TestPlanetLongitudes(astronomy.Body.Jupiter, "temp/py_longitude_Jupiter.txt", "opp") or
TestPlanetLongitudes(astronomy.Body.Saturn, "temp/py_longitude_Saturn.txt", "opp") or
TestPlanetLongitudes(astronomy.Body.Uranus, "temp/py_longitude_Uranus.txt", "opp") or
TestPlanetLongitudes(astronomy.Body.Neptune, "temp/py_longitude_Neptune.txt", "opp") or
TestPlanetLongitudes(astronomy.Body.Pluto, "temp/py_longitude_Pluto.txt", "opp") or
SearchElongTest() or
Pass('Elongation')
)
#-----------------------------------------------------------------------------------------------------------
def MonthNumber(mtext):
return 1 + ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'].index(mtext)
def ParseJplHorizonsDateTime(line):
m = re.match(r'^\s*(\d{4})-(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)-(\d{2})\s(\d{2}):(\d{2})\s+(.*)$', line)
if not m:
return None, None
year = int(m.group(1))
month = MonthNumber(m.group(2))
day = int(m.group(3))
hour = int(m.group(4))
minute = int(m.group(5))
rest = m.group(6)
time = astronomy.Time.Make(year, month, day, hour, minute, 0)
return time, rest
def CheckMagnitudeData(body, filename):
limit = 0.012
sum_squared_diff = 0.0
with open(filename, 'rt') as infile:
count = lnum = 0
for line in infile:
lnum += 1
line = line.strip()
(time, rest) = ParseJplHorizonsDateTime(line)
if (time is not None) and (rest is not None) and not ('n.a.' in rest):
data = [float(t) for t in rest.split()]
if len(data) != 7:
print('PY CheckMagnitudeData({} line {}): invalid data format'.format(filename, lnum))
return 1
(mag, sbrt, dist, rdot, delta, deldot, phase_angle) = data
illum = astronomy.Illumination(body, time)
diff = illum.mag - mag
if vabs(diff) > limit:
print('PY CheckMagnitudeData({} line {}): EXCESSIVE ERROR: correct mag={}, calc mag={}'.format(filename, lnum, mag, illum.mag))
return 1
sum_squared_diff += diff * diff
if count == 0:
diff_lo = diff_hi = diff
else:
diff_lo = vmin(diff_lo, diff)
diff_hi = vmax(diff_hi, diff)
count += 1
if count == 0:
print('PY CheckMagnitudeData: Did not find any data in file: {}'.format(filename))
return 1
rms = sqrt(sum_squared_diff / count)
Debug('PY CheckMagnitudeData: {:<21s} {:5d} rows diff_lo={:0.4f} diff_hi={:0.4f} rms={:0.4f}'.format(filename, count, diff_lo, diff_hi, rms))
return 0
def CheckSaturn():
# JPL Horizons does not include Saturn's rings in its magnitude models.
# I still don't have authoritative test data for Saturn's magnitude.
# For now, I just test for consistency with Paul Schlyter's formulas at:
# http://www.stjarnhimlen.se/comp/ppcomp.html#15
data = [
( "1972-01-01T00:00Z", -0.31725492, +24.43386475 ),
( "1980-01-01T00:00Z", +0.85796177, -1.72627324 ),
( "2009-09-04T00:00Z", +1.01932560, +0.01834451 ),
( "2017-06-15T00:00Z", -0.12303373, -26.60068380 ),
( "2019-05-01T00:00Z", +0.33124502, -23.47173574 ),
( "2025-09-25T00:00Z", +0.50543708, +1.69118986 ),
( "2032-05-15T00:00Z", -0.04649573, +26.95238680 )
]
error = 0
for (dtext, mag, tilt) in data:
time = astronomy.Time.Parse(dtext)
illum = astronomy.Illumination(astronomy.Body.Saturn, time)
Debug('PY Saturn: date={} calc mag={:12.8f} ring_tilt={:12.8f}'.format(dtext, illum.mag, illum.ring_tilt))
mag_diff = vabs(illum.mag - mag)
if mag_diff > 1.0e-4:
print('PY CheckSaturn: Excessive magnitude error {}'.format(mag_diff))
error = 1
tilt_diff = vabs(illum.ring_tilt - tilt)
if (tilt_diff > 3.0e-5):
print('PY CheckSaturn: Excessive ring tilt error {}'.format(tilt_diff))
error = 1
return error
def TestMaxMag(body, filename):
# Example of input data:
#
# 2001-02-21T08:00Z 2001-02-27T08:00Z 23.17 19.53 -4.84
#
# JPL Horizons test data has limited floating point precision in the magnitude values.
# There is a pair of dates for the beginning and end of the max magnitude period,
# given the limited precision. We pick the point halfway between as the supposed max magnitude time.
with open(filename, 'rt') as infile:
lnum = 0
search_time = astronomy.Time.Make(2001, 1, 1, 0, 0, 0)
for line in infile:
lnum += 1
line = line.strip()
tokenlist = line.split()
if len(tokenlist) == 5:
time1 = astronomy.Time.Parse(tokenlist[0])
time2 = astronomy.Time.Parse(tokenlist[1])
if time1 and time2:
center_time = time1.AddDays(0.5*(time2.ut - time1.ut))
correct_mag = float(tokenlist[4])
illum = astronomy.SearchPeakMagnitude(body, search_time)
mag_diff = vabs(illum.mag - correct_mag)
hours_diff = 24.0 * vabs(illum.time.ut - center_time.ut)
Debug('PY TestMaxMag: mag_diff={:0.3f}, hours_diff={:0.3f}'.format(mag_diff, hours_diff))
if hours_diff > 7.1:
print('PY TestMaxMag({} line {}): EXCESSIVE TIME DIFFERENCE.'.format(filename, lnum))
return 1
if mag_diff > 0.005:
print('PY TestMaxMag({} line {}): EXCESSIVE MAGNITUDE DIFFERENCE.'.format(filename, lnum))
return 1
search_time = time2
Debug('PY TestMaxMag: processed {} lines from file {}'.format(lnum, filename))
return 0
def Magnitude():
nfailed = 0
nfailed += CheckMagnitudeData(astronomy.Body.Sun, 'magnitude/Sun.txt')
nfailed += CheckMagnitudeData(astronomy.Body.Moon, 'magnitude/Moon.txt')
nfailed += CheckMagnitudeData(astronomy.Body.Mercury, 'magnitude/Mercury.txt')
nfailed += CheckMagnitudeData(astronomy.Body.Venus, 'magnitude/Venus.txt')
nfailed += CheckMagnitudeData(astronomy.Body.Mars, 'magnitude/Mars.txt')
nfailed += CheckMagnitudeData(astronomy.Body.Jupiter, 'magnitude/Jupiter.txt')
nfailed += CheckSaturn()
nfailed += CheckMagnitudeData(astronomy.Body.Uranus, 'magnitude/Uranus.txt')
nfailed += CheckMagnitudeData(astronomy.Body.Neptune, 'magnitude/Neptune.txt')
nfailed += CheckMagnitudeData(astronomy.Body.Pluto, 'magnitude/Pluto.txt')
nfailed += TestMaxMag(astronomy.Body.Venus, 'magnitude/maxmag_Venus.txt')
if nfailed == 0:
print('PY Magnitude: PASS')
else:
print('PY Magnitude: failed {} test(s).'.format(nfailed))
return 1
return 0
#-----------------------------------------------------------------------------------------------------------
def ToggleDir(dir):
return astronomy.Direction(-dir.value)
def RiseSetSlot(ut1, ut2, direction, observer):
maxDiff = 0.0
nslots = 100
for i in range(1, nslots):
ut = ut1 + (i / nslots)*(ut2 - ut1)
time = astronomy.Time(ut)
result = astronomy.SearchRiseSet(astronomy.Body.Sun, observer, direction, time, -1.0)
if not result:
print('PY RiseSetSlot: backward slot search failed for {} before {}'.format(direction, time))
return 1
diff = SECONDS_PER_DAY * vabs(result.ut - ut1)
maxDiff = max(maxDiff, diff)
result = astronomy.SearchRiseSet(astronomy.Body.Sun, observer, direction, time, +1.0)
if not result:
print('PY RiseSetSlot: forward slot search failed for {} after {}'.format(direction, time))
return 1
diff = SECONDS_PER_DAY * vabs(result.ut - ut2)
maxDiff = max(maxDiff, diff)
if maxDiff > 0.13:
print('PY RiseSetSlot: EXCESSIVE {} slot-test discrepancy = {:0.6f} seconds.'.format(direction, maxDiff))
return 1
Debug('PY RiseSetSlot: {} slot-test discrepancy = {:0.6f} seconds.'.format(direction, maxDiff))
return 0
def RiseSetReverse():
nsamples = 5000
nudge = 0.1
utList = []
observer = astronomy.Observer(30.5, -90.7, 0.0)
dtMin = +1000.0
dtMax = -1000.0
maxDiff = 0.0
# Find alternating sunrise/sunset events in forward chronological order.
dir = astronomy.Direction.Rise
time = astronomy.Time.Make(2022, 1, 1, 0, 0, 0)
for i in range(nsamples):
result = astronomy.SearchRiseSet(astronomy.Body.Sun, observer, dir, time, +1.0)
if not result:
print('PY RiseSetReverse: cannot find {} event after {}'.format(dir, time))
return 1
utList.append(result.ut)
if i > 0:
# Check the time between consecutive sunrise/sunset events.
# These will vary considerably with the seasons, so just make sure we don't miss any entirely.
dt = v(utList[i] - utList[i-1])
dtMin = min(dtMin, dt)
dtMax = max(dtMax, dt)
dir = ToggleDir(dir)
time = result.AddDays(+nudge)
Debug('PY RiseSetReverse: dtMin={:0.6f} days, dtMax={:0.6f} days.'.format(dtMin, dtMax))
if (dtMin < 0.411) or (dtMax > 0.589):
print('PY RiseSetReverse: Invalid intervals between sunrise/sunset.')
return 1
# Perform the same search in reverse. Verify we get consistent rise/set times.
for i in range(nsamples-1, -1, -1):
dir = ToggleDir(dir)
result = astronomy.SearchRiseSet(astronomy.Body.Sun, observer, dir, time, -1.0)
if not result:
print('PY RiseSetReverse: cannot find {] event before {}.'.format(dir, time))
return 1
diff = SECONDS_PER_DAY * vabs(utList[i] - result.ut)
maxDiff = max(maxDiff, diff)
time = result.AddDays(-nudge)
if maxDiff > 0.1:
print('PY RiseSetReverse: EXCESSIVE forward/backward discrepancy = {:0.6f} seconds.'.format(maxDiff))
return 1
Debug('PY RiseSetReverse: forward/backward discrepancy = {:0.6f} seconds.'.format(maxDiff))
# All even indexes in utList hold sunrise times.
# All odd indexes in utList hold sunset times.
# Verify that forward/backward searches for consecutive sunrises/sunsets
# resolve correctly for 100 time slots between them.
k = (nsamples // 2) & ~1
return (
RiseSetSlot(utList[k], utList[k+2], astronomy.Direction.Rise, observer) or
RiseSetSlot(utList[k+1], utList[k+3], astronomy.Direction.Set, observer) or
Pass('RiseSetReverse')
)
#-----------------------------------------------------------------------------------------------------------
def RiseSet(filename = 'riseset/riseset.txt'):
sum_minutes = 0.0
max_minutes = 0.0
nudge_days = 0.01
observer = None
current_body = None
a_dir = 0
b_dir = 0
with open(filename, 'rt') as infile:
lnum = 0
for line in infile:
lnum += 1
line = line.strip()
# Moon 103 -61 1944-01-02T17:08Z s
# Moon 103 -61 1944-01-03T05:47Z r
m = re.match(r'^([A-Za-z]+)\s+(-?[0-9\.]+)\s+(-?[0-9\.]+)\s+(\d+)-(\d+)-(\d+)T(\d+):(\d+)Z\s+([sr])$', line)
if not m:
print('PY RiseSet({} line {}): invalid data format'.format(filename, lnum))
return 1
name = m.group(1)
longitude = float(m.group(2))
latitude = float(m.group(3))
year = int(m.group(4))
month = int(m.group(5))
day = int(m.group(6))
hour = int(m.group(7))
minute = int(m.group(8))
kind = m.group(9)
correct_time = astronomy.Time.Make(year, month, day, hour, minute, 0)
direction = astronomy.Direction.Rise if kind == 'r' else astronomy.Direction.Set
body = astronomy.BodyCode(name)
if body == astronomy.Body.Invalid:
print('PY RiseSet({} line {}): invalid body name "{}"'.format(filename, lnum, name))
return 1
# Every time we see a new geographic location, start a new iteration
# of finding all rise/set times for that UTC calendar year.
if (observer is None) or (observer.latitude != latitude) or (observer.longitude != longitude) or (current_body != body):
current_body = body
observer = astronomy.Observer(latitude, longitude, 0)
r_search_date = s_search_date = astronomy.Time.Make(year, 1, 1, 0, 0, 0)
b_evt = None
Debug('PY RiseSet: {:<7s} lat={:0.1f} lon={:0.1f}'.format(name, latitude, longitude))
if b_evt is not None:
# Recycle the second event from the previous iteration as the first event.
a_evt = b_evt
a_dir = b_dir
b_evt = None
else:
r_evt = astronomy.SearchRiseSet(body, observer, astronomy.Direction.Rise, r_search_date, 366.0)
if r_evt is None:
print('PY RiseSet({} line {}): rise search failed'.format(filename, lnum))
return 1
s_evt = astronomy.SearchRiseSet(body, observer, astronomy.Direction.Set, s_search_date, 366.0)
if s_evt is None:
print('PY RiseSet({} line {}): set search failed'.format(filename, lnum))
return 1
# Expect the current event to match the earlier of the found times.
if r_evt.tt < s_evt.tt:
a_evt = r_evt
b_evt = s_evt
a_dir = astronomy.Direction.Rise
b_dir = astronomy.Direction.Set
else:
a_evt = s_evt
b_evt = r_evt
a_dir = astronomy.Direction.Set
b_dir = astronomy.Direction.Rise
# Nudge the event times forward a tiny amount.
r_search_date = r_evt.AddDays(nudge_days)
s_search_date = s_evt.AddDays(nudge_days)
if a_dir != direction:
print('PY RiseSet({} line {}): expected dir={} but found {}'.format(filename, lnum, direction, a_dir))
return 1
error_minutes = (24.0 * 60.0) * vabs(a_evt.tt - correct_time.tt)
sum_minutes += error_minutes ** 2
max_minutes = vmax(max_minutes, error_minutes)
if error_minutes > 1.18:
print('PY RiseSet({} line {}): excessive prediction time error = {} minutes.'.format(filename, lnum, error_minutes))
print(' correct = {}, calculated = {}'.format(correct_time, a_evt))
return 1
rms_minutes = sqrt(sum_minutes / lnum)
print('PY RiseSet: passed {} lines: time errors in minutes: rms={:0.4f}, max={:0.4f}'.format(lnum, rms_minutes, max_minutes))
return 0
#-----------------------------------------------------------------------------------------------------------
def LunarApsis(filename = 'apsides/moon.txt'):
max_minutes = 0.0
max_km = 0.0
with open(filename, 'rt') as infile:
start_time = astronomy.Time.Make(2001, 1, 1, 0, 0, 0)
lnum = 0
for line in infile:
lnum += 1
if lnum == 1:
apsis = astronomy.SearchLunarApsis(start_time)
else:
apsis = astronomy.NextLunarApsis(apsis)
tokenlist = line.split()
if len(tokenlist) != 3:
print('PY LunarApsis({} line {}): invalid data format'.format(filename, lnum))
return 1
correct_time = astronomy.Time.Parse(tokenlist[1])
if not correct_time:
print('PY LunarApsis({} line {}): invalid time'.format(filename, lnum))
return 1
kind = astronomy.ApsisKind(int(tokenlist[0]))
if apsis.kind != kind:
print('PY LunarApsis({} line {}): Expected kind {} but found {}'.format(filename, lnum, kind, apsis.kind))
return 1
dist_km = float(tokenlist[2])
diff_minutes = (24.0 * 60.0) * vabs(apsis.time.ut - correct_time.ut)
diff_km = vabs(apsis.dist_km - dist_km)
if diff_minutes > 35.0:
print('PY LunarApsis({} line {}): Excessive time error = {} minutes.'.format(filename, lnum, diff_minutes))
return 1
if diff_km > 25.0:
print('PY LunarApsis({} line {}): Excessive distance error = {} km.'.format(filename, lnum, diff_km))
return 1
max_minutes = vmax(max_minutes, diff_minutes)
max_km = vmax(max_km, diff_km)
print('PY LunarApsis: found {} events, max time error = {:0.3f} minutes, max distance error = {:0.3f} km.'.format(lnum, max_minutes, max_km))
return 0
#-----------------------------------------------------------------------------------------------------------
def CompareMatrices(caller, a, b, tolerance):
for i in range(3):
for j in range(3):
diff = vabs(a.rot[i][j] - b.rot[i][j])
if diff > tolerance:
print('PY CompareMatrices ERROR({}): matrix[{}][{}] = {}, expected {}, diff {}'.format(caller, i, j, a.rot[i][j], b.rot[i][j], diff))
sys.exit(1)
def CompareVectors(caller, a, b, tolerance):
diff = vabs(a.x - b.x)
if diff > tolerance:
print('PY CompareVectors ERROR({}): vector x = {}, expected {}, diff {}'.format(caller, a.x, b.x, diff))
sys.exit(1)
diff = vabs(a.y - b.y)
if diff > tolerance:
print('PY CompareVectors ERROR({}): vector y = {}, expected {}, diff {}'.format(caller, a.y, b.y, diff))
sys.exit(1)
diff = vabs(a.z - b.z)
if diff > tolerance:
print('PY CompareVectors ERROR({}): vector z = {}, expected {}, diff {}'.format(caller, a.z, b.z, diff))
sys.exit(1)
def Rotation_MatrixInverse():
a = astronomy.RotationMatrix([
[1, 4, 7],
[2, 5, 8],
[3, 6, 9]
])
v = astronomy.RotationMatrix([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
])
b = astronomy.InverseRotation(a)
CompareMatrices('Rotation_MatrixInverse', b, v, 0)
def Rotation_MatrixMultiply():
a = astronomy.RotationMatrix([
[1, 4, 7],
[2, 5, 8],
[3, 6, 9]
])
b = astronomy.RotationMatrix([
[10, 13, 16],
[11, 14, 17],
[12, 15, 18]
])
v = astronomy.RotationMatrix([
[84, 201, 318],
[90, 216, 342],
[96, 231, 366]
])
c = astronomy.CombineRotation(b, a)
CompareMatrices('Rotation_MatrixMultiply', c, v, 0)
def VectorDiff(a, b):
dx = a.x - b.x
dy = a.y - b.y
dz = a.z - b.z
return sqrt(dx*dx + dy*dy + dz*dz)
def Test_GAL_EQJ_NOVAS(filename):
THRESHOLD_SECONDS = 8.8
rot = astronomy.Rotation_EQJ_GAL()
time = astronomy.Time(0.0) # placeholder time - value does not matter
with open(filename, 'rt') as infile:
lnum = 0
max_diff = 0.0
for line in infile:
lnum += 1
token = line.split()
if len(token) != 4:
print('PY Test_GAL_EQJ_NOVAS({} line {}): Wrong number of tokens.'.format(filename, lnum))
sys.exit(1)
ra = float(token[0])
dec = float(token[1])
glon = float(token[2])
glat = float(token[3])
eqj_sphere = astronomy.Spherical(dec, 15.0*ra, 1.0)
eqj_vec = astronomy.VectorFromSphere(eqj_sphere, time)
gal_vec = astronomy.RotateVector(rot, eqj_vec)
gal_sphere = astronomy.SphereFromVector(gal_vec)
dlat = gal_sphere.lat - glat
dlon = math.cos(math.radians(glat)) * (gal_sphere.lon - glon)
diff = 3600.0 * math.hypot(dlon, dlat)
if diff > THRESHOLD_SECONDS:
print('PY Test_GAL_EQJ_NOVAS({} line {}): EXCESSIVE ERROR = {:0.3f}'.format(filename, lnum, diff))
sys.exit(1)
if diff > max_diff:
max_diff = diff
Debug('PY Test_GAL_EQJ_NOVAS: PASS. max_diff = {:0.3f} arcseconds.'.format(max_diff))
return 0
def Test_EQJ_EQD(body):
# Verify conversion of equatorial J2000 to equatorial of-date, and back.
# Use established functions to calculate spherical coordinates for the body, in both EQJ and EQD.
time = astronomy.Time.Make(2019, 12, 8, 20, 50, 0)
observer = astronomy.Observer(+35, -85, 0)
eq2000 = astronomy.Equator(body, time, observer, False, True)
eqdate = astronomy.Equator(body, time, observer, True, True)
# Convert EQJ spherical coordinates to vector.
v2000 = eq2000.vec
# Find rotation matrix.
r = astronomy.Rotation_EQJ_EQD(time)
# Rotate EQJ vector to EQD vector.
vdate = astronomy.RotateVector(r, v2000)
# Convert vector back to angular equatorial coordinates.
equcheck = astronomy.EquatorFromVector(vdate)
# Compare the result with the eqdate.
ra_diff = vabs(equcheck.ra - eqdate.ra)
dec_diff = vabs(equcheck.dec - eqdate.dec)
dist_diff = vabs(equcheck.dist - eqdate.dist)
Debug('PY Test_EQJ_EQD: {} ra={}, dec={}, dist={}, ra_diff={}, dec_diff={}, dist_diff={}'.format(
body.name, eqdate.ra, eqdate.dec, eqdate.dist, ra_diff, dec_diff, dist_diff
))
if ra_diff > 1.0e-14 or dec_diff > 1.0e-14 or dist_diff > 4.0e-15:
print('PY Test_EQJ_EQD: EXCESSIVE ERROR')
sys.exit(1)
# Perform the inverse conversion back to equatorial J2000 coordinates.
ir = astronomy.Rotation_EQD_EQJ(time)
t2000 = astronomy.RotateVector(ir, vdate)
diff = VectorDiff(t2000, v2000)
Debug('PY Test_EQJ_EQD: {} inverse diff = {}'.format(body.name, diff))
if diff > 5.0e-15:
print('PY Test_EQJ_EQD: EXCESSIVE INVERSE ERROR')
sys.exit(1)
def Test_EQD_HOR(body):
# Use existing functions to calculate horizontal coordinates of the body for the time+observer.
time = astronomy.Time.Make(1970, 12, 13, 5, 15, 0)
observer = astronomy.Observer(-37, +45, 0)
eqd = astronomy.Equator(body, time, observer, True, True)
Debug('PY Test_EQD_HOR {}: OFDATE ra={}, dec={}'.format(body.name, eqd.ra, eqd.dec))
hor = astronomy.Horizon(time, observer, eqd.ra, eqd.dec, astronomy.Refraction.Normal)
# Calculate the position of the body as an equatorial vector of date.
vec_eqd = eqd.vec
# Calculate rotation matrix to convert equatorial J2000 vector to horizontal vector.
rot = astronomy.Rotation_EQD_HOR(time, observer)
# Rotate the equator of date vector to a horizontal vector.
vec_hor = astronomy.RotateVector(rot, vec_eqd)
# Convert the horizontal vector to horizontal angular coordinates.
xsphere = astronomy.HorizonFromVector(vec_hor, astronomy.Refraction.Normal)
diff_alt = vabs(xsphere.lat - hor.altitude)
diff_az = vabs(xsphere.lon - hor.azimuth)
Debug('PY Test_EQD_HOR {}: trusted alt={}, az={}; test alt={}, az={}; diff_alt={}, diff_az={}'.format(
body.name, hor.altitude, hor.azimuth, xsphere.lat, xsphere.lon, diff_alt, diff_az))
if diff_alt > 4.0e-14 or diff_az > 1.2e-13:
print('PY Test_EQD_HOR: EXCESSIVE HORIZONTAL ERROR.')
sys.exit(1)
# Confirm that we can convert back to horizontal vector.
check_hor = astronomy.VectorFromHorizon(xsphere, time, astronomy.Refraction.Normal)
diff = VectorDiff(check_hor, vec_hor)
Debug('PY Test_EQD_HOR {}: horizontal recovery: diff = {}'.format(body.name, diff))
if diff > 3.0e-15:
print('PY Test_EQD_HOR: EXCESSIVE ERROR IN HORIZONTAL RECOVERY.')
sys.exit(1)
# Verify the inverse translation from horizontal vector to equatorial of-date vector.
irot = astronomy.Rotation_HOR_EQD(time, observer)
check_eqd = astronomy.RotateVector(irot, vec_hor)
diff = VectorDiff(check_eqd, vec_eqd)
Debug('PY Test_EQD_HOR {}: OFDATE inverse rotation diff = {}'.format(body.name, diff))
if diff > 2.7e-15:
print('PY Test_EQD_HOR: EXCESSIVE OFDATE INVERSE HORIZONTAL ERROR.')
sys.exit(1)
# Exercise HOR to EQJ translation.
eqj = astronomy.Equator(body, time, observer, False, True)
vec_eqj = eqj.vec
yrot = astronomy.Rotation_HOR_EQJ(time, observer)
check_eqj = astronomy.RotateVector(yrot, vec_hor)
diff = VectorDiff(check_eqj, vec_eqj)
Debug('PY Test_EQD_HOR {}: J2000 inverse rotation diff = {}'.format(body.name, diff))
if diff > 5.0e-15:
print('PY Test_EQD_HOR: EXCESSIVE J2000 INVERSE HORIZONTAL ERROR.')
sys.exit(1)
# Verify the inverse translation: EQJ to HOR.
zrot = astronomy.Rotation_EQJ_HOR(time, observer)
another_hor = astronomy.RotateVector(zrot, vec_eqj)
diff = VectorDiff(another_hor, vec_hor)
Debug('PY Test_EQD_HOR {}: EQJ inverse rotation diff = {}'.format(body.name, diff))
if diff > 6.0e-15:
print('PY Test_EQD_HOR: EXCESSIVE EQJ INVERSE HORIZONTAL ERROR.')
sys.exit(1)
IdentityMatrix = astronomy.RotationMatrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
def CheckInverse(aname, bname, arot, brot):
crot = astronomy.CombineRotation(arot, brot)
caller = 'CheckInverse({},{})'.format(aname, bname)
CompareMatrices(caller, crot, IdentityMatrix, 2.0e-15)
def CheckCycle(cyclename, arot, brot, crot):
xrot = astronomy.CombineRotation(arot, brot)
irot = astronomy.InverseRotation(xrot)
CompareMatrices(cyclename, crot, irot, 2.0e-15)
def Test_RotRoundTrip():
# In each round trip, calculate a forward rotation and a backward rotation.
# Verify the two are inverse matrices.
time = astronomy.Time.Make(2067, 5, 30, 14, 45, 0)
observer = astronomy.Observer(+28, -82, 0)
# Round trip #1: EQJ <==> EQD.
eqj_eqd = astronomy.Rotation_EQJ_EQD(time)
eqd_eqj = astronomy.Rotation_EQD_EQJ(time)
CheckInverse('eqj_eqd', 'eqd_eqj', eqj_eqd, eqd_eqj)
# Round trip #2: EQJ <==> ECL.
eqj_ecl = astronomy.Rotation_EQJ_ECL()
ecl_eqj = astronomy.Rotation_ECL_EQJ()
CheckInverse('eqj_ecl', 'ecl_eqj', eqj_ecl, ecl_eqj)
# Round trip #3: EQJ <==> HOR.
eqj_hor = astronomy.Rotation_EQJ_HOR(time, observer)
hor_eqj = astronomy.Rotation_HOR_EQJ(time, observer)
CheckInverse('eqj_hor', 'hor_eqj', eqj_hor, hor_eqj)
# Round trip #4: EQD <==> HOR.
eqd_hor = astronomy.Rotation_EQD_HOR(time, observer)
hor_eqd = astronomy.Rotation_HOR_EQD(time, observer)
CheckInverse('eqd_hor', 'hor_eqd', eqd_hor, hor_eqd)
# Round trip #5: EQD <==> ECL.
eqd_ecl = astronomy.Rotation_EQD_ECL(time)
ecl_eqd = astronomy.Rotation_ECL_EQD(time)
CheckInverse('eqd_ecl', 'ecl_eqd', eqd_ecl, ecl_eqd)
# Round trip #6: HOR <==> ECL.
hor_ecl = astronomy.Rotation_HOR_ECL(time, observer)
ecl_hor = astronomy.Rotation_ECL_HOR(time, observer)
CheckInverse('hor_ecl', 'ecl_hor', hor_ecl, ecl_hor)
# Round trip #7: EQD <==> ECT
eqd_ect = astronomy.Rotation_EQD_ECT(time)
ect_eqd = astronomy.Rotation_ECT_EQD(time)
CheckInverse('eqd_ect', 'ect_eqd', eqd_ect, ect_eqd)
# Round trip #8: EQJ <==> ECT
eqj_ect = astronomy.Rotation_EQJ_ECT(time)
ect_eqj = astronomy.Rotation_ECT_EQJ(time)
CheckInverse('eqj_ect', 'ect_eqj', eqj_ect, ect_eqj)
# Verify that combining different sequences of rotations result
# in the expected combination.
# For example, (EQJ ==> HOR ==> ECL) must be the same matrix as (EQJ ==> ECL).
CheckCycle('eqj_ecl, ecl_eqd, eqd_eqj', eqj_ecl, ecl_eqd, eqd_eqj)
CheckCycle('eqj_hor, hor_ecl, ecl_eqj', eqj_hor, hor_ecl, ecl_eqj)
CheckCycle('eqj_hor, hor_eqd, eqd_eqj', eqj_hor, hor_eqd, eqd_eqj)
CheckCycle('ecl_eqd, eqd_hor, hor_ecl', ecl_eqd, eqd_hor, hor_ecl)
CheckCycle('eqj_eqd, eqd_ect, ect_eqj', eqj_eqd, eqd_ect, ect_eqj)
Debug('PY Test_RotRoundTrip: PASS')
def Rotation_Pivot():
tolerance = 1.0e-15
# Start with an identity matrix.
ident = astronomy.IdentityMatrix()
# Pivot 90 degrees counterclockwise around the z-axis.
r = astronomy.Pivot(ident, 2, +90.0)
# Put the expected answer in 'a'.
a = astronomy.RotationMatrix([
[ 0, +1, 0],
[-1, 0, 0],
[ 0, 0, +1],
])
# Compare actual 'r' with expected 'a'.
CompareMatrices('Rotation_Pivot #1', r, a, tolerance)
# Pivot again, -30 degrees around the x-axis.
r = astronomy.Pivot(r, 0, -30.0)
# Pivot a third time, 180 degrees around the y-axis.
r = astronomy.Pivot(r, 1, +180.0)
# Use the 'r' matrix to rotate a vector.
v1 = astronomy.Vector(1, 2, 3, astronomy.Time(0))
v2 = astronomy.RotateVector(r, v1)
# Initialize the expected vector 've'.
ve = astronomy.Vector(+2.0, +2.3660254037844390, -2.0980762113533156, v1.t)
CompareVectors('Rotation_Pivot #2', v2, ve, tolerance)
Debug('PY Rotation_Pivot: PASS')
def Test_EQD_ECT():
time = astronomy.Time.Make(1900, 1, 1, 0, 0, 0.0)
stopTime = astronomy.Time.Make(2100, 1, 1, 0, 0, 0.0)
count = 0
max_diff = 0.0
while time.ut <= stopTime.ut:
# Get Moon's geocentric position in EQJ.
eqj = astronomy.GeoMoon(time)
# Convert EQJ to EQD.
eqj_eqd = astronomy.Rotation_EQJ_EQD(time)
eqd = astronomy.RotateVector(eqj_eqd, eqj)
# Convert EQD to ECT.
eqd_ect = astronomy.Rotation_EQD_ECT(time)
ect = astronomy.RotateVector(eqd_ect, eqd)
# Independently get the Moon's spherical coordinates in ECT.
sphere = astronomy.EclipticGeoMoon(time)
# Convert spherical coordinates to ECT vector.
check_ect = astronomy.VectorFromSphere(sphere, time)
# Verify the two ECT vectors are identical, within tolerance.
max_diff = max(max_diff, VectorDiff(ect, check_ect))
time = time.AddDays(10.0)
count += 1
if max_diff > 3.743e-18:
print('PY Test_EQD_ECT: excessive vector diff = {:0.6e} au.'.format(max_diff))
sys.exit(1)
Debug('PY Test_EQD_ECT: PASS: count = {}, max_diff = {:0.6e} au.'.format(count, max_diff))
def Ecliptic():
time = astronomy.Time.Make(1900, 1, 1, 0, 0, 0.0)
stopTime = astronomy.Time.Make(2100, 1, 1, 0, 0, 0.0)
count = 0
max_vec_diff = 0
max_angle_diff = 0.0
while time.ut <= stopTime.ut:
# Get Moon's geocentric position in EQJ.
eqj = astronomy.GeoMoon(time)
# Convert EQJ to ECT.
eclip = astronomy.Ecliptic(eqj)
# Confirm that the ecliptic angles and ecliptic vector are consistent.
check_sphere = astronomy.Spherical(eclip.elat, eclip.elon, eclip.vec.Length())
check_vec = astronomy.VectorFromSphere(check_sphere, time)
max_angle_diff = max(max_angle_diff, VectorDiff(eclip.vec, check_vec))
# Independently get the Moon's spherical coordinates in ECT.
sphere = astronomy.EclipticGeoMoon(time)
# Convert spherical coordinates to ECT vector.
check_ect = astronomy.VectorFromSphere(sphere, time)
# Verify the two ECT vectors are identical, within tolerance.
max_vec_diff = max(max_vec_diff, VectorDiff(eclip.vec, check_ect))
time = time.AddDays(10.0)
count += 1
if max_vec_diff > 3.388e-18:
return Fail('Ecliptic', 'EXCESSIVE VECTOR DIFF = {:0.6e} au.'.format(max_vec_diff))
if max_angle_diff > 3.007e-18:
return Fail('Ecliptic', 'EXCESSIVE ANGLE DIFF = {:0.6e} au.'.format(max_angle_diff))
print('PY Ecliptic: PASS: count = {:d}, max_vec_diff = {:0.6e} au, max_angle_diff = {:0.6e} au.'.format(count, max_vec_diff, max_angle_diff))
return 0
def Rotation():
Rotation_MatrixInverse()
Rotation_MatrixMultiply()
Rotation_Pivot()
Test_GAL_EQJ_NOVAS('temp/galeqj.txt')
Test_EQJ_EQD(astronomy.Body.Mercury)
Test_EQJ_EQD(astronomy.Body.Venus)
Test_EQJ_EQD(astronomy.Body.Mars)
Test_EQJ_EQD(astronomy.Body.Jupiter)
Test_EQJ_EQD(astronomy.Body.Saturn)
Test_EQD_HOR(astronomy.Body.Mercury)
Test_EQD_HOR(astronomy.Body.Venus)
Test_EQD_HOR(astronomy.Body.Mars)
Test_EQD_HOR(astronomy.Body.Jupiter)
Test_EQD_HOR(astronomy.Body.Saturn)
Test_EQD_ECT()
Test_RotRoundTrip()
print('PY Rotation: PASS')
return 0
#-----------------------------------------------------------------------------------------------------------
def Refraction():
alt = -90.1
while alt <= +90.1:
refr = astronomy.RefractionAngle(astronomy.Refraction.Normal, alt)
corrected = alt + refr
inv_refr = astronomy.InverseRefractionAngle(astronomy.Refraction.Normal, corrected)
check_alt = corrected + inv_refr
diff = vabs(check_alt - alt)
if diff > 2.0e-14:
print('PY Refraction: ERROR - excessive error: alt={}, refr={}, diff={}'.format(alt, refr, diff))
return 1
alt += 0.001
print('PY Refraction: PASS')
return 0
#-----------------------------------------------------------------------------------------------------------
def PlanetApsis():
start_time = astronomy.Time.Make(1700, 1, 1, 0, 0, 0)
body = astronomy.Body.Mercury
while body.value <= astronomy.Body.Pluto.value:
count = 1
period = astronomy.PlanetOrbitalPeriod(body)
filename = os.path.join('apsides', 'apsis_{}.txt'.format(body.value))
min_interval = -1.0
max_diff_days = 0.0
max_dist_ratio = 0.0
apsis = astronomy.SearchPlanetApsis(body, start_time)
with open(filename, 'rt') as infile:
for line in infile:
token = line.split()
if len(token) != 3:
print('PY PlanetApsis({} line {}): Invalid data format: {} tokens'.format(filename, count, len(token)))
return 1
expected_kind = astronomy.ApsisKind(int(token[0]))
expected_time = astronomy.Time.Parse(token[1])
expected_distance = float(token[2])
if apsis.kind != expected_kind:
print('PY PlanetApsis({} line {}): WRONG APSIS KIND: expected {}, found {}'.format(filename, count, expected_kind, apsis.kind))
return 1
diff_days = vabs(expected_time.tt - apsis.time.tt)
max_diff_days = vmax(max_diff_days, diff_days)
diff_degrees = (diff_days / period) * 360
degree_threshold = 0.1
if diff_degrees > degree_threshold:
print('PY PlanetApsis: FAIL - {} exceeded angular threshold ({} vs {} degrees)'.format(body.name, diff_degrees, degree_threshold))
return 1
diff_dist_ratio = vabs(expected_distance - apsis.dist_au) / expected_distance
max_dist_ratio = vmax(max_dist_ratio, diff_dist_ratio)
if diff_dist_ratio > 1.05e-4:
print('PY PlanetApsis({} line {}): distance ratio {} is too large.'.format(filename, count, diff_dist_ratio))
return 1
# Calculate the next apsis.
prev_time = apsis.time
apsis = astronomy.NextPlanetApsis(body, apsis)
count += 1
interval = apsis.time.tt - prev_time.tt
if min_interval < 0.0:
min_interval = max_interval = interval
else:
min_interval = vmin(min_interval, interval)
max_interval = vmax(max_interval, interval)
if count < 2:
print('PY PlanetApsis: FAILED to find apsides for {}'.format(body))
return 1
Debug('PY PlanetApsis: {:4d} apsides for {:<9s} -- intervals: min={:0.2f}, max={:0.2f}, ratio={:0.6f}; max day={:0.3f}, degrees={:0.3f}, dist ratio={:0.6f}'.format(
count,
body.name,
min_interval, max_interval, max_interval / min_interval,
max_diff_days,
(max_diff_days / period) * 360.0,
max_dist_ratio
))
body = astronomy.Body(body.value + 1)
print('PY PlanetApsis: PASS')
return 0
#-----------------------------------------------------------------------------------------------------------
def Constellation():
inFileName = 'constellation/test_input.txt'
lnum = 0
failcount = 0
with open(inFileName, 'rt') as infile:
for line in infile:
lnum += 1
m = re.match(r'^\s*(\d+)\s+(\S+)\s+(\S+)\s+([A-Z][a-zA-Z]{2})\s*$', line)
if not m:
print('PY Constellation: invalid line {} in file {}'.format(lnum, inFileName))
return 1
id = int(m.group(1))
ra = float(m.group(2))
dec = float(m.group(3))
symbol = m.group(4)
constel = astronomy.Constellation(ra, dec)
if constel.symbol != symbol:
print('Star {:6d}: expected {}, found {} at B1875 RA={:10.6f}, DEC={:10.6f}'.format(id, symbol, constel.symbol, constel.ra1875, constel.dec1875))
failcount += 1
if failcount > 0:
print('PY Constellation: {} failures'.format(failcount))
return 1
print('PY Constellation: PASS (verified {})'.format(lnum))
return 0
#-----------------------------------------------------------------------------------------------------------
def LunarEclipseIssue78():
# https://github.com/cosinekitty/astronomy/issues/78
eclipse = astronomy.SearchLunarEclipse(astronomy.Time.Make(2020, 12, 19, 0, 0, 0))
expected_peak = astronomy.Time.Make(2021, 5, 26, 11, 18, 42) # https://www.timeanddate.com/eclipse/lunar/2021-may-26
dt = (expected_peak.tt - eclipse.peak.tt) * SECONDS_PER_DAY
if vabs(dt) > 40.0:
print('LunarEclipseIssue78: Excessive prediction error = {} seconds.'.format(dt))
return 1
if eclipse.kind != astronomy.EclipseKind.Total:
print('Expected total eclipse; found: {}'.format(eclipse.kind))
return 1
print('PY LunarEclipseIssue78: PASS')
return 0
#-----------------------------------------------------------------------------------------------------------
def LunarEclipse():
filename = 'eclipse/lunar_eclipse.txt'
with open(filename, 'rt') as infile:
eclipse = astronomy.SearchLunarEclipse(astronomy.Time.Make(1701, 1, 1, 0, 0, 0))
lnum = 0
skip_count = 0
diff_count = 0
sum_diff_minutes = 0.0
max_diff_minutes = 0.0
diff_limit = 2.0
for line in infile:
lnum += 1
# Make sure numeric data are finite numbers.
v(eclipse.obscuration)
v(eclipse.sd_partial)
v(eclipse.sd_penum)
v(eclipse.sd_total)
if len(line) < 17:
print('PY LunarEclipse({} line {}): line is too short.'.format(filename, lnum))
return 1
time_text = line[0:17]
peak_time = astronomy.Time.Parse(time_text)
token = line[17:].split()
if len(token) != 2:
print('PY LunarEclipse({} line {}): wrong number of tokens.'.format(filename, lnum))
return 1
partial_minutes = float(token[0])
total_minutes = float(token[1])
sd_valid = False
frac_valid = False
# Verify that the calculated eclipse semi-durations are consistent with the kind.
# Verify that obscurations also make sense for the kind.
if eclipse.kind == astronomy.EclipseKind.Penumbral:
sd_valid = (eclipse.sd_penum > 0.0) and (eclipse.sd_partial == 0.0) and (eclipse.sd_total == 0.0)
frac_valid = (eclipse.obscuration == 0.0)
elif eclipse.kind == astronomy.EclipseKind.Partial:
sd_valid = (eclipse.sd_penum > 0.0) and (eclipse.sd_partial > 0.0) and (eclipse.sd_total == 0.0)
frac_valid = (0.0 < eclipse.obscuration < 1.0)
elif eclipse.kind == astronomy.EclipseKind.Total:
sd_valid = (eclipse.sd_penum > 0.0) and (eclipse.sd_partial > 0.0) and (eclipse.sd_total > 0.0)
frac_valid = (eclipse.obscuration == 1.0)
else:
print('PY LunarEclipse({} line {}): invalid eclipse kind {}.'.format(filename, lnum, eclipse.kind))
return 1
if not sd_valid:
print('PY LunarEclipse({} line {}): invalid semidurations.'.format(filename, lnum))
return 1
if not frac_valid:
print('PY LunarEclipse({} line {}): invalid obscuration {:0.8f} for eclipsekind {}.'.format(filename, lnum, eclipse.obscuration, eclipse.kind))
# Check eclipse peak time.
diff_days = eclipse.peak.ut - peak_time.ut
# Tolerate missing penumbral eclipses - skip to next input line without calculating next eclipse.
if partial_minutes == 0.0 and diff_days > 20.0:
skip_count += 1
continue
diff_minutes = (24.0 * 60.0) * vabs(diff_days)
sum_diff_minutes += diff_minutes
diff_count += 1
if diff_minutes > diff_limit:
print("PY LunarEclipse expected center: {}".format(peak_time))
print("PY LunarEclipse found center: {}".format(eclipse.peak))
print("PY LunarEclipse({} line {}): EXCESSIVE center time error = {} minutes ({} days).".format(filename, lnum, diff_minutes, diff_days))
return 1
if diff_minutes > max_diff_minutes:
max_diff_minutes = diff_minutes
# check partial eclipse duration
diff_minutes = vabs(partial_minutes - eclipse.sd_partial)
sum_diff_minutes += diff_minutes
diff_count += 1
if diff_minutes > diff_limit:
print("PY LunarEclipse({} line {}): EXCESSIVE partial eclipse semiduration error: {} minutes".format(filename, lnum, diff_minutes))
return 1
if diff_minutes > max_diff_minutes:
max_diff_minutes = diff_minutes
# check total eclipse duration
diff_minutes = vabs(total_minutes - eclipse.sd_total)
sum_diff_minutes += diff_minutes
diff_count += 1
if diff_minutes > diff_limit:
print("PY LunarEclipse({} line {}): EXCESSIVE total eclipse semiduration error: {} minutes".format(filename, lnum, diff_minutes))
return 1
if diff_minutes > max_diff_minutes:
max_diff_minutes = diff_minutes
# calculate for next iteration
eclipse = astronomy.NextLunarEclipse(eclipse.peak)
print("PY LunarEclipse: PASS (verified {}, skipped {}, max_diff_minutes = {}, avg_diff_minutes = {})".format(lnum, skip_count, max_diff_minutes, (sum_diff_minutes / diff_count)))
return 0
#-----------------------------------------------------------------------------------------------------------
def VectorFromAngles(lat, lon):
rlat = math.radians(v(lat))
rlon = math.radians(v(lon))
coslat = math.cos(rlat)
return [
math.cos(rlon) * coslat,
math.sin(rlon) * coslat,
math.sin(rlat)
]
def AngleDiff(alat, alon, blat, blon):
a = VectorFromAngles(alat, alon)
b = VectorFromAngles(blat, blon)
dot = a[0]*b[0] + a[1]*b[1] + a[2]*b[2]
if dot <= -1.0:
return 180.0
if dot >= +1.0:
return 0.0
return v(math.degrees(math.acos(dot)))
def KindFromChar(typeChar):
return {
'P': astronomy.EclipseKind.Partial,
'A': astronomy.EclipseKind.Annular,
'T': astronomy.EclipseKind.Total,
'H': astronomy.EclipseKind.Total,
}[typeChar]
def GlobalSolarEclipse():
expected_count = 1180
max_minutes = 0.0
max_angle = 0.0
skip_count = 0
eclipse = astronomy.SearchGlobalSolarEclipse(astronomy.Time.Make(1701, 1, 1, 0, 0, 0))
filename = 'eclipse/solar_eclipse.txt'
with open(filename, 'rt') as infile:
lnum = 0
for line in infile:
lnum += 1
# 1889-12-22T12:54:15Z -6 T -12.7 -12.8
token = line.split()
if len(token) != 5:
print('PY GlobalSolarEclipse({} line {}): invalid token count = {}'.format(filename, lnum, len(token)))
return 1
peak = astronomy.Time.Parse(token[0])
expected_kind = KindFromChar(token[2])
lat = float(token[3])
lon = float(token[4])
diff_days = eclipse.peak.tt - peak.tt
# Sometimes we find marginal eclipses that aren't listed in the test data.
# Ignore them if the distance between the Sun/Moon shadow axis and the Earth's center is large.
while diff_days < -25.0 and eclipse.distance > 9000.0:
skip_count += 1
eclipse = astronomy.NextGlobalSolarEclipse(eclipse.peak)
diff_days = eclipse.peak.ut - peak.ut
# Validate the eclipse prediction.
diff_minutes = (24 * 60) * vabs(diff_days)
if diff_minutes > 7.56:
print('PY GlobalSolarEclipse({} line {}): EXCESSIVE TIME ERROR = {} minutes'.format(filename, lnum, diff_minutes))
return 1
if diff_minutes > max_minutes:
max_minutes = diff_minutes
# Validate the eclipse kind, but only when it is not a "glancing" eclipse.
if (eclipse.distance < 6360) and (eclipse.kind != expected_kind):
print('PY GlobalSolarEclipse({} line {}): WRONG ECLIPSE KIND: expected {}, found {}'.format(filename, lnum, expected_kind, eclipse.kind))
return 1
if eclipse.kind == astronomy.EclipseKind.Total or eclipse.kind == astronomy.EclipseKind.Annular:
# When the distance between the Moon's shadow ray and the Earth's center is beyond 6100 km,
# it creates a glancing blow whose geographic coordinates are excessively sensitive to
# slight changes in the ray. Therefore, it is unreasonable to count large errors there.
if eclipse.distance < 6100.0:
diff_angle = AngleDiff(lat, lon, eclipse.latitude, eclipse.longitude)
if diff_angle > 0.247:
print('PY GlobalSolarEclipse({} line {}): EXCESSIVE GEOGRAPHIC LOCATION ERROR = {} degrees'.format(filename, lnum, diff_angle))
return 1
if diff_angle > max_angle:
max_angle = diff_angle
# Verify the obscuration value is consistent with the eclipse kind.
if eclipse.kind == astronomy.EclipseKind.Partial:
if eclipse.obscuration is not None:
print('PY GlobalSolarEclipse({} line {}): Expected obscuration = None for partial eclipse, but found {}'.format(filename, lnum, eclipse.obscuration))
return 1
elif eclipse.kind == astronomy.EclipseKind.Annular:
if not (0.8 < v(eclipse.obscuration) < 1.0):
print('PY GlobalSolarEclipse({} line {}): Invalid obscuration = {:0.8f} for annular eclipse.'.format(filename, lnum, eclipse.obscuration))
return 1
elif eclipse.kind == astronomy.EclipseKind.Total:
if v(eclipse.obscuration) != 1.0:
print('PY GlobalSolarEclipse({} line {}): Invalid obscuration = {:0.8f} for total eclipse.'.format(filename, lnum, eclipse.obscuration))
return 1
else:
print('PY GlobalSolarEclipse({} line {}): Unhandled eclipse kind {}'.format(filename, lnum, eclipse.kind))
return 1
eclipse = astronomy.NextGlobalSolarEclipse(eclipse.peak)
if lnum != expected_count:
print('PY GlobalSolarEclipse: WRONG LINE COUNT = {}, expected {}'.format(lnum, expected_count))
return 1
if skip_count > 2:
print('PY GlobalSolarEclipse: EXCESSIVE SKIP COUNT = {}'.format(skip_count))
return 1
print('PY GlobalSolarEclipse: PASS ({} verified, {} skipped, max minutes = {}, max angle = {})'.format(lnum, skip_count, max_minutes, max_angle))
return 0
#-----------------------------------------------------------------------------------------------------------
def LocalSolarEclipse1():
expected_count = 1180
max_minutes = 0.0
skip_count = 0
filename = 'eclipse/solar_eclipse.txt'
with open(filename, 'rt') as infile:
lnum = 0
for line in infile:
lnum += 1
funcname = 'LocalSolarEclipse({} line {})'.format(filename, lnum)
# 1889-12-22T12:54:15Z -6 T -12.7 -12.8
token = line.split()
if len(token) != 5:
return Fail(funcname, 'invalid token count = {}'.format(len(token)))
peak = astronomy.Time.Parse(token[0])
#typeChar = token[2]
lat = float(token[3])
lon = float(token[4])
observer = astronomy.Observer(lat, lon, 0.0)
# Start the search 20 days before we know the eclipse should peak.
search_start = peak.AddDays(-20)
eclipse = astronomy.SearchLocalSolarEclipse(search_start, observer)
# Validate the predicted eclipse peak time.
diff_days = eclipse.peak.time.tt - peak.tt
if diff_days > 20:
skip_count += 1
continue
diff_minutes = (24 * 60) * vabs(diff_days)
if diff_minutes > 7.737:
return Fail(funcname, 'EXCESSIVE TIME ERROR = {} minutes'.format(diff_minutes))
if diff_minutes > max_minutes:
max_minutes = diff_minutes
# Verify obscuration makes sense for this kind of eclipse.
v(eclipse.obscuration)
if eclipse.kind in [astronomy.EclipseKind.Annular, astronomy.EclipseKind.Partial]:
frac_valid = (0.0 < eclipse.obscuration < 1.0)
elif eclipse.kind == astronomy.EclipseKind.Total:
frac_valid = (eclipse.obscuration == 1.0)
else:
return Fail(funcname, 'Invalid eclipse kind {}'.format(eclipse.kind))
if not frac_valid:
return Fail(funcname, 'Invalid eclipse obscuration {:0.8f} for {} eclipse.'.format(eclipse.obscuration, eclipse.kind))
funcname = 'LocalSolarEclipse1({})'.format(filename)
if lnum != expected_count:
return Fail(funcname, 'WRONG LINE COUNT = {}, expected {}'.format(lnum, expected_count))
if skip_count > 6:
return Fail(funcname, 'EXCESSIVE SKIP COUNT = {}'.format(skip_count))
print('PY LocalSolarEclipse1: PASS ({} verified, {} skipped, max minutes = {})'.format(lnum, skip_count, max_minutes))
return 0
def TrimLine(line):
# Treat '#' as a comment character.
poundIndex = line.find('#')
if poundIndex >= 0:
line = line[:poundIndex]
return line.strip()
def ParseEvent(time_str, alt_str, required):
if required:
time = astronomy.Time.Parse(time_str)
altitude = float(alt_str)
return astronomy.EclipseEvent(time, altitude)
if time_str != '-':
raise Exception('Expected event time to be "-" but found "{}"'.format(time_str))
return None
def LocalSolarEclipse2():
# Test ability to calculate local solar eclipse conditions away from
# the peak position on the Earth.
filename = 'eclipse/local_solar_eclipse.txt'
lnum = 0
verify_count = 0
max_minutes = 0.0
max_degrees = 0.0
def CheckEvent(calc, expect):
nonlocal max_minutes, max_degrees
diff_minutes = (24 * 60) * vabs(expect.time.ut - calc.time.ut)
if diff_minutes > max_minutes:
max_minutes = diff_minutes
if diff_minutes > 1.0:
raise Exception('CheckEvent({} line {}): EXCESSIVE TIME ERROR: {} minutes.'.format(filename, lnum, diff_minutes))
# Ignore discrepancies for negative altitudes, because of quirky and irrelevant differences in refraction models.
if expect.altitude >= 0.0:
diff_alt = vabs(expect.altitude - calc.altitude)
if diff_alt > max_degrees:
max_degrees = diff_alt
if diff_alt > 0.5:
raise Exception('CheckEvent({} line {}): EXCESSIVE ALTITUDE ERROR: {} degrees.'.format(filename, lnum, diff_alt))
with open(filename, 'rt') as infile:
for line in infile:
lnum += 1
line = TrimLine(line)
if line == '':
continue
token = line.split()
if len(token) != 13:
print('PY LocalSolarEclipse2({} line {}): Incorrect token count = {}'.format(filename, lnum, len(token)))
return 1
latitude = float(token[0])
longitude = float(token[1])
observer = astronomy.Observer(latitude, longitude, 0)
expected_kind = KindFromChar(token[2])
is_umbral = (expected_kind != astronomy.EclipseKind.Partial)
p1 = ParseEvent(token[3], token[4], True)
t1 = ParseEvent(token[5], token[6], is_umbral)
peak = ParseEvent(token[7], token[8], True)
t2 = ParseEvent(token[9], token[10], is_umbral)
p2 = ParseEvent(token[11], token[12], True)
search_time = p1.time.AddDays(-20)
eclipse = astronomy.SearchLocalSolarEclipse(search_time, observer)
if eclipse.kind != expected_kind:
print('PY LocalSolarEclipse2({} line {}): expected eclipse kind "{}" but found "{}".'.format(
filename, lnum, expected_kind, eclipse.kind
))
return 1
CheckEvent(eclipse.peak, peak)
CheckEvent(eclipse.partial_begin, p1)
CheckEvent(eclipse.partial_end, p2)
if is_umbral:
CheckEvent(eclipse.total_begin, t1)
CheckEvent(eclipse.total_end, t2)
verify_count += 1
print('PY LocalSolarEclipse2: PASS ({} verified, max_minutes = {}, max_degrees = {})'.format(verify_count, max_minutes, max_degrees))
return 0
def LocalSolarEclipse():
return (
LocalSolarEclipse1() or
LocalSolarEclipse2()
)
#-----------------------------------------------------------------------------------------------------------
def GlobalAnnularCase(year, month, day, obscuration):
# Search for the first solar eclipse that occurs after the given date.
time = astronomy.Time.Make(year, month, day, 0, 0, 0.0)
eclipse = astronomy.SearchGlobalSolarEclipse(time)
funcname = 'GlobalAnnularCase({:04d}-{:02d}-{:02d})'.format(year, month, day)
# Verify the eclipse is within 1 day after the search basis time.
dt = v(eclipse.peak.ut - time.ut)
if not (0.0 <= dt <= 1.0):
return Fail(funcname, 'found eclipse {:0.4f} days after search time.'.format(dt))
# Verify we found an annular solar eclipse.
if eclipse.kind != astronomy.EclipseKind.Annular:
return Fail(funcname, 'expected annular eclipse but found {}'.format(eclipse.kind))
# Check how accurately we calculated obscuration.
diff = v(eclipse.obscuration - obscuration)
if abs(diff) > 0.0000904:
return Fail(funcname, 'excessive obscuration error = {:0.8f}, expected = {:0.8f}, actual = {:0.8f}'.format(diff, obscuration, eclipse.obscuration))
Debug('{}: obscuration error = {:11.8f}'.format(funcname, diff))
return 0
def LocalSolarCase(year, month, day, latitude, longitude, kind, obscuration, tolerance):
funcname = 'LocalSolarCase({:04d}-{:02d}-{:02d})'.format(year, month, day)
time = astronomy.Time.Make(year, month, day, 0, 0, 0.0)
observer = astronomy.Observer(latitude, longitude, 0.0)
eclipse = astronomy.SearchLocalSolarEclipse(time, observer)
dt = v(eclipse.peak.time.ut - time.ut)
if not (0.0 <= dt <= 1.0):
return Fail(funcname, 'eclipse found {:0.4f} days after search date'.format(dt))
if eclipse.kind != kind:
return Fail(funcname, 'expected {} eclipse, but found {}.'.format(kind, eclipse.kind))
diff = v(eclipse.obscuration - obscuration)
if abs(diff) > tolerance:
return Fail(funcname, 'obscuration diff = {:0.8f}, expected = {:0.8f}, actual = {:0.8f}'.format(diff, obscuration, eclipse.obscuration))
Debug('{}: obscuration diff = {:11.8f}'.format(funcname, diff))
return 0
def SolarFraction():
return (
# Verify global solar eclipse obscurations for annular eclipses only.
# This is because they are the only nontrivial values for global solar eclipses.
# The trivial values are all validated exactly by GlobalSolarEclipseTest().
GlobalAnnularCase(2023, 10, 14, 0.90638) or # https://www.eclipsewise.com/solar/SEprime/2001-2100/SE2023Oct14Aprime.html
GlobalAnnularCase(2024, 10, 2, 0.86975) or # https://www.eclipsewise.com/solar/SEprime/2001-2100/SE2024Oct02Aprime.html
GlobalAnnularCase(2027, 2, 6, 0.86139) or # https://www.eclipsewise.com/solar/SEprime/2001-2100/SE2027Feb06Aprime.html
GlobalAnnularCase(2028, 1, 26, 0.84787) or # https://www.eclipsewise.com/solar/SEprime/2001-2100/SE2028Jan26Aprime.html
GlobalAnnularCase(2030, 6, 1, 0.89163) or # https://www.eclipsewise.com/solar/SEprime/2001-2100/SE2030Jun01Aprime.html
# Verify obscuration values for specific locations on the Earth.
# Local solar eclipse calculations include obscuration for all types of eclipse, not just annular and total.
LocalSolarCase(2023, 10, 14, 11.3683, -83.1017, astronomy.EclipseKind.Annular, 0.90638, 0.000080) or # https://www.eclipsewise.com/solar/SEprime/2001-2100/SE2023Oct14Aprime.html
LocalSolarCase(2023, 10, 14, 25.78, -80.22, astronomy.EclipseKind.Partial, 0.578, 0.000023) or # https://aa.usno.navy.mil/calculated/eclipse/solar?eclipse=22023&lat=25.78&lon=-80.22&label=Miami%2C+FL&height=0&submit=Get+Data
LocalSolarCase(2023, 10, 14, 30.2666, -97.7000, astronomy.EclipseKind.Partial, 0.8867, 0.001016) or # http://astro.ukho.gov.uk/eclipse/0332023/Austin_TX_United_States_2023Oct14.png
LocalSolarCase(2024, 4, 8, 25.2900, -104.1383, astronomy.EclipseKind.Total, 1.0, 0.0 ) or # https://www.eclipsewise.com/solar/SEprime/2001-2100/SE2024Apr08Tprime.html
LocalSolarCase(2024, 4, 8, 37.76, -122.44, astronomy.EclipseKind.Partial, 0.340, 0.000604) or # https://aa.usno.navy.mil/calculated/eclipse/solar?eclipse=12024&lat=37.76&lon=-122.44&label=San+Francisco%2C+CA&height=0&submit=Get+Data
LocalSolarCase(2024, 10, 2, -21.9533, -114.5083, astronomy.EclipseKind.Annular, 0.86975, 0.000061) or # https://www.eclipsewise.com/solar/SEprime/2001-2100/SE2024Oct02Aprime.html
LocalSolarCase(2024, 10, 2, -33.468, -70.636, astronomy.EclipseKind.Partial, 0.436, 0.000980) or # https://aa.usno.navy.mil/calculated/eclipse/solar?eclipse=22024&lat=-33.468&lon=-70.636&label=Santiago%2C+Chile&height=0&submit=Get+Data
LocalSolarCase(2030, 6, 1, 56.525, 80.0617, astronomy.EclipseKind.Annular, 0.89163, 0.000067) or # https://www.eclipsewise.com/solar/SEprime/2001-2100/SE2030Jun01Aprime.html
LocalSolarCase(2030, 6, 1, 40.388, 49.914, astronomy.EclipseKind.Partial, 0.67240, 0.000599) or # http://xjubier.free.fr/en/site_pages/SolarEclipseCalc_Diagram.html
LocalSolarCase(2030, 6, 1, 40.3667, 49.8333, astronomy.EclipseKind.Partial, 0.6736, 0.001464) or # http://astro.ukho.gov.uk/eclipse/0132030/Baku_Azerbaijan_2030Jun01.png
Pass('SolarFraction')
)
#-----------------------------------------------------------------------------------------------------------
def TransitFile(body, filename, limit_minutes, limit_sep):
lnum = 0
max_minutes = 0
max_sep = 0
with open(filename, 'rt') as infile:
transit = astronomy.SearchTransit(body, astronomy.Time.Make(1600, 1, 1, 0, 0, 0))
for line in infile:
lnum += 1
token = line.strip().split()
# 22:17 1881-11-08T00:57Z 03:38 3.8633
if len(token) != 4:
print('PY TransitFile({} line {}): bad data format.'.format(filename, lnum))
return 1
textp = token[1]
text1 = textp[0:11] + token[0] + 'Z'
text2 = textp[0:11] + token[2] + 'Z'
timep = astronomy.Time.Parse(textp)
time1 = astronomy.Time.Parse(text1)
time2 = astronomy.Time.Parse(text2)
separation = float(token[3])
# If the start time is after the peak time, it really starts on the previous day.
if time1.ut > timep.ut:
time1 = time1.AddDays(-1.0)
# If the finish time is before the peak time, it really starts on the following day.
if time2.ut < timep.ut:
time2 = time2.AddDays(+1.0)
diff_start = (24.0 * 60.0) * vabs(time1.ut - transit.start.ut )
diff_peak = (24.0 * 60.0) * vabs(timep.ut - transit.peak.ut )
diff_finish = (24.0 * 60.0) * vabs(time2.ut - transit.finish.ut)
diff_sep = vabs(separation - transit.separation)
max_minutes = vmax(max_minutes, diff_start)
max_minutes = vmax(max_minutes, diff_peak)
max_minutes = vmax(max_minutes, diff_finish)
if max_minutes > limit_minutes:
print('PY TransitFile({} line {}): EXCESSIVE TIME ERROR = {} minutes.'.format(filename, lnum, max_minutes))
return 1
max_sep = vmax(max_sep, diff_sep)
if max_sep > limit_sep:
print('PY TransitFile({} line {}): EXCESSIVE SEPARATION ERROR = {} arcminutes.'.format(filename, lnum, max_sep))
return 1
transit = astronomy.NextTransit(body, transit.finish)
print('PY TransitFile({}): PASS - verified {}, max minutes = {}, max sep arcmin = {}'.format(filename, lnum, max_minutes, max_sep))
return 0
def Transit():
if 0 != TransitFile(astronomy.Body.Mercury, 'eclipse/mercury.txt', 10.710, 0.2121):
return 1
if 0 != TransitFile(astronomy.Body.Venus, 'eclipse/venus.txt', 9.109, 0.6772):
return 1
return 0
#-----------------------------------------------------------------------------------------------------------
def PlutoCheckDate(ut, arcmin_tolerance, x, y, z):
time = astronomy.Time(ut)
try:
timeText = str(time)
except OverflowError:
timeText = "???"
Debug('PY PlutoCheck: {} = {} UT = {} TT'.format(timeText, time.ut, time.tt))
vector = astronomy.HelioVector(astronomy.Body.Pluto, time)
dx = v(vector.x - x)
dy = v(vector.y - y)
dz = v(vector.z - z)
diff = sqrt(dx*dx + dy*dy + dz*dz)
dist = sqrt(x*x + y*y + z*z) - 1.0
arcmin = (diff / dist) * (180.0 * 60.0 / math.pi)
Debug('PY PlutoCheck: calc pos = [{}, {}, {}]'.format(vector.x, vector.y, vector.z))
Debug('PY PlutoCheck: ref pos = [{}, {}, {}]'.format(x, y, z))
Debug('PY PlutoCheck: del pos = [{}, {}, {}]'.format(vector.x - x, vector.y - y, vector.z - z))
Debug('PY PlutoCheck: diff = {} AU, {} arcmin'.format(diff, arcmin))
if v(arcmin) > arcmin_tolerance:
print('PY PlutoCheck: EXCESSIVE ERROR')
return 1
Debug('')
return 0
def PlutoCheck():
if PlutoCheckDate( +18250.0, 0.089, +37.4377303523676090, -10.2466292454075898, -14.4773101310875809): return 1
if PlutoCheckDate( -856493.0, 4.067, +23.4292113199166252, +42.1452685817740829, +6.0580908436642940): return 1
if PlutoCheckDate( +435633.0, 0.016, -27.3178902095231813, +18.5887022581070305, +14.0493896259306936): return 1
if PlutoCheckDate( 0.0, 8e-9, -9.8753673425269000, -27.9789270580402771, -5.7537127596369588): return 1
if PlutoCheckDate( +800916.0, 2.286, -29.5266052645301365, +12.0554287322176474, +12.6878484911631091): return 1
print("PY PlutoCheck: PASS")
return 0
#-----------------------------------------------------------------------------------------------------------
def GeoidTestCase(time, observer, ofdate):
topo_moon = astronomy.Equator(astronomy.Body.Moon, time, observer, ofdate, False)
surface = astronomy.ObserverVector(time, observer, ofdate)
geo_moon = astronomy.GeoVector(astronomy.Body.Moon, time, False)
if ofdate:
# GeoVector() returns J2000 coordinates. Convert to equator-of-date coordinates.
rot = astronomy.Rotation_EQJ_EQD(time)
geo_moon = astronomy.RotateVector(rot, geo_moon)
dx = astronomy.KM_PER_AU * v((geo_moon.x - surface.x) - topo_moon.vec.x)
dy = astronomy.KM_PER_AU * v((geo_moon.y - surface.y) - topo_moon.vec.y)
dz = astronomy.KM_PER_AU * v((geo_moon.z - surface.z) - topo_moon.vec.z)
diff = sqrt(dx*dx + dy*dy + dz*dz)
Debug('PY GeoidTestCase: ofdate={}, time={}, obs={}, surface=({}, {}, {}), diff = {} km'.format(
ofdate,
time,
observer,
astronomy.KM_PER_AU * surface.x,
astronomy.KM_PER_AU * surface.y,
astronomy.KM_PER_AU * surface.z,
diff
))
# Require 1 millimeter accuracy! (one millionth of a kilometer).
if diff > 1.0e-6:
print('PY GeoidTestCase: EXCESSIVE POSITION ERROR.')
return 1
# Verify that we can convert the surface vector back to an observer.
vobs = astronomy.VectorObserver(surface, ofdate)
lat_diff = vabs(vobs.latitude - observer.latitude)
# Longitude is meaningless at the poles, so don't bother checking it there.
if -89.99 <= observer.latitude <= +89.99:
lon_diff = vabs(vobs.longitude - observer.longitude)
if lon_diff > 180.0:
lon_diff = 360.0 - lon_diff
lon_diff = vabs(lon_diff * math.cos(math.degrees(observer.latitude)))
if lon_diff > 1.0e-6:
print('PY GeoidTestCase: EXCESSIVE longitude check error = {}'.format(lon_diff))
return 1
else:
lon_diff = 0.0
h_diff = vabs(vobs.height - observer.height)
Debug('PY GeoidTestCase: vobs={}, lat_diff={}, lon_diff={}, h_diff={}'.format(vobs, lat_diff, lon_diff, h_diff))
if lat_diff > 1.0e-6:
print('PY GeoidTestCase: EXCESSIVE latitude check error = {}'.format(lat_diff))
return 1
if h_diff > 0.001:
print('PY GeoidTestCase: EXCESSIVE height check error = {}'.format(h_diff))
return 1
return 0
def Geoid():
time_list = [
astronomy.Time.Parse('1066-09-27T18:00:00Z'),
astronomy.Time.Parse('1970-12-13T15:42:00Z'),
astronomy.Time.Parse('1970-12-13T15:43:00Z'),
astronomy.Time.Parse('2015-03-05T02:15:45Z')
]
observer_list = [
astronomy.Observer( 0.0, 0.0, 0.0),
astronomy.Observer( +1.5, +2.7, 7.4),
astronomy.Observer( -1.5, -2.7, 7.4),
astronomy.Observer(-53.7, +141.7, 100.0),
astronomy.Observer(+30.0, -85.2, -50.0),
astronomy.Observer(+90.0, +45.0, -50.0),
astronomy.Observer(-90.0, -180.0, 0.0),
astronomy.Observer(-89.0, -81.0, 1234.0),
astronomy.Observer(+89.0, -103.4, 279.8),
astronomy.Observer(+48.2, 24.5, 2019.0),
astronomy.Observer(+28.5, -82.3, -3.4)
]
# Test hand-crafted locations.
for observer in observer_list:
for time in time_list:
if GeoidTestCase(time, observer, False):
return 1
if GeoidTestCase(time, observer, True):
return 1
# More exhaustive tests for a single time value across many different geographic coordinates.
# Solving for latitude is the most complicated part of VectorObserver, so
# I test for every 1-degree increment of latitude, but with 5-degree increments for longitude.
time = astronomy.Time.Parse('2021-06-20T15:08:00Z')
lat = -90
while lat <= +90:
lon = -175
while lon <= +180:
observer = astronomy.Observer(lat, lon, 0.0)
if GeoidTestCase(time, observer, True):
return 1
lon += 5
lat += 1
print('PY GeoidTest: PASS')
return 0
#-----------------------------------------------------------------------------------------------------------
def JupiterMoons_CheckJpl(mindex, tt, pos, vel):
pos_tolerance = 9.0e-4
vel_tolerance = 9.0e-4
time = astronomy.Time.FromTerrestrialTime(tt)
jm = astronomy.JupiterMoons(time)
moon = SelectJupiterMoon(jm, mindex)
dx = v(pos[0] - moon.x)
dy = v(pos[1] - moon.y)
dz = v(pos[2] - moon.z)
mag = sqrt(pos[0]*pos[0] + pos[1]*pos[1] + pos[2]*pos[2])
pos_diff = sqrt(dx*dx + dy*dy + dz*dz) / mag
if pos_diff > pos_tolerance:
print('PY JupiterMoons_CheckJpl(mindex={}, tt={}): excessive position error {}'.format(mindex, tt, pos_diff))
return 1
dx = v(vel[0] - moon.vx)
dy = v(vel[1] - moon.vy)
dz = v(vel[2] - moon.vz)
mag = sqrt(vel[0]*vel[0] + vel[1]*vel[1] + vel[2]*vel[2])
vel_diff = sqrt(dx*dx + dy*dy + dz*dz) / mag
if vel_diff > vel_tolerance:
print('PY JupiterMoons_CheckJpl(mindex={}, tt={}): excessive velocity error {}'.format(mindex, tt, vel_diff))
return 1
Debug('PY JupiterMoons_CheckJpl: mindex={}, tt={}, pos_diff={}, vel_diff={}'.format(mindex, tt, pos_diff, vel_diff))
return 0
def JupiterMoons():
for mindex in range(4):
filename = 'jupiter_moons/horizons/jm{}.txt'.format(mindex)
with open(filename, 'rt') as infile:
lnum = 0
found = False
part = -1
expected_count = 5001
count = 0
for line in infile:
line = line.rstrip()
lnum += 1
if not found:
if line == '$$SOE':
found = True
part = 0
elif line.startswith('Revised:'):
check_mindex = int(line[76:]) - 501
if mindex != check_mindex:
print('PY JupiterMoons({} line {}): moon index does not match: check={}, mindex={}'.format(filename, lnum, check_mindex, mindex))
return 1
elif line == '$$EOE':
break
else:
if part == 0:
# 2446545.000000000 = A.D. 1986-Apr-24 12:00:00.0000 TDB
tt = float(line.split()[0]) - 2451545.0 # convert JD to J2000 TT
elif part == 1:
# X = 1.134408131605554E-03 Y =-2.590904586750408E-03 Z =-7.490427225904720E-05
match = re.match(r'\s*X =\s*(\S+) Y =\s*(\S+) Z =\s*(\S+)', line)
if not match:
print('PY JupiterMoons({} line {}): cannot parse position vector.'.format(filename, lnum))
return 1
pos = [ float(match.group(1)), float(match.group(2)), float(match.group(3)) ]
else: # part == 2
# VX= 9.148038778472862E-03 VY= 3.973823407182510E-03 VZ= 2.765660368640458E-04
match = re.match(r'\s*VX=\s*(\S+) VY=\s*(\S+) VZ=\s*(\S+)', line)
if not match:
print('PY JupiterMoons({} line {}): cannot parse velocity vector.'.format(filename, lnum))
return 1
vel = [ float(match.group(1)), float(match.group(2)), float(match.group(3)) ]
if JupiterMoons_CheckJpl(mindex, tt, pos, vel):
print('PY JupiterMoons({} line {}): FAILED VERIFICATION.'.format(filename, lnum))
return 1
count += 1
part = (part + 1) % 3
if count != expected_count:
print('PY JupiterMoons: expected {} test cases, but found {}'.format(expected_count, count))
return 1
print('PY JupiterMoons: PASS')
return 0
#-----------------------------------------------------------------------------------------------------------
def Issue103():
# https://github.com/cosinekitty/astronomy/issues/103
observer = astronomy.Observer(29, -81, 10)
ut = -8.817548982869034808e+04
time = astronomy.Time(ut)
body = astronomy.Body.Venus
ofdate = astronomy.Equator(body, time, observer, True, True)
hor = astronomy.Horizon(time, observer, ofdate.ra, ofdate.dec, astronomy.Refraction.Airless)
print('tt = {:23.16f}'.format(time.tt))
print('az = {:23.16f}'.format(hor.azimuth))
print('alt = {:23.16f}'.format(hor.altitude))
return 0
#-----------------------------------------------------------------------------------------------------------
class _bary_stats_t:
def __init__(self):
self.max_rdiff = 0.0
self.max_vdiff = 0.0
def StateVectorDiff(relative, vec, x, y, z):
dx = v(vec[0] - x)
dy = v(vec[1] - y)
dz = v(vec[2] - z)
diff_squared = dx*dx + dy*dy + dz*dz
if relative:
diff_squared /= (vec[0]*vec[0] + vec[1]*vec[1] + vec[2]*vec[2])
return sqrt(diff_squared)
#-----------------------------------------------------------------------------------------------------------
def VerifyState(func, stats, filename, lnum, time, pos, vel, r_thresh, v_thresh):
state = func.Eval(time)
rdiff = StateVectorDiff((r_thresh > 0.0), pos, state.x, state.y, state.z)
if rdiff > stats.max_rdiff:
stats.max_rdiff = rdiff
vdiff = StateVectorDiff((v_thresh > 0.0), vel, state.vx, state.vy, state.vz)
if vdiff > stats.max_vdiff:
stats.max_vdiff = vdiff
if rdiff > abs(r_thresh):
print('PY VerifyState({} line {}): EXCESSIVE position error = {:0.4e}'.format(filename, lnum, rdiff))
return 1
if vdiff > abs(v_thresh):
print('PY VerifyState({} line {}): EXCESSIVE velocity error = {:0.4e}'.format(filename, lnum, vdiff))
return 1
return 0
class JplStateRecord:
def __init__(self, lnum, state):
self.lnum = lnum
self.state = state
def JplHorizonsStateVectors(filename):
with open(filename, 'rt') as infile:
lnum = 0
part = 0
found_begin = False
for line in infile:
line = line.rstrip()
lnum += 1
if not found_begin:
if line == '$$SOE':
found_begin = True
elif line == '$$EOE':
break
else:
if part == 0:
# 2446545.000000000 = A.D. 1986-Apr-24 12:00:00.0000 TDB
tt = float(line.split()[0]) - 2451545.0 # convert JD to J2000 TT
time = astronomy.Time.FromTerrestrialTime(tt)
elif part == 1:
# X = 1.134408131605554E-03 Y =-2.590904586750408E-03 Z =-7.490427225904720E-05
match = re.match(r'\s*X =\s*(\S+) Y =\s*(\S+) Z =\s*(\S+)', line)
if not match:
print('PY JplHorizonsStateVectors({} line {}): cannot parse position vector.'.format(filename, lnum))
return 1
rx, ry, rz = float(match.group(1)), float(match.group(2)), float(match.group(3))
else: # part == 2
# VX= 9.148038778472862E-03 VY= 3.973823407182510E-03 VZ= 2.765660368640458E-04
match = re.match(r'\s*VX=\s*(\S+) VY=\s*(\S+) VZ=\s*(\S+)', line)
if not match:
print('PY JplHorizonsStateVectors({} line {}): cannot parse velocity vector.'.format(filename, lnum))
return 1
vx, vy, vz = float(match.group(1)), float(match.group(2)), float(match.group(3))
yield JplStateRecord(lnum, astronomy.StateVector(rx, ry, rz, vx, vy, vz, time))
part = (part + 1) % 3
return 0
def VerifyStateBody(func, filename, r_thresh, v_thresh):
stats = _bary_stats_t()
count = 0
for rec in JplHorizonsStateVectors(filename):
time = rec.state.t
pos = [rec.state.x, rec.state.y, rec.state.z]
vel = [rec.state.vx, rec.state.vy, rec.state.vz]
if VerifyState(func, stats, filename, rec.lnum, time, pos, vel, r_thresh, v_thresh):
print('PY VerifyStateBody({} line {}): FAILED VERIFICATION.'.format(filename, rec.lnum))
return 1
count += 1
Debug('PY VerifyStateBody({}): PASS - Tested {} cases. max rdiff={:0.3e}, vdiff={:0.3e}'.format(filename, count, stats.max_rdiff, stats.max_vdiff))
return 0
#-----------------------------------------------------------------------------------------------------------
# Constants for use inside unit tests only; they doesn't make sense for public consumption.
_Body_GeoMoon = -100
_Body_Geo_EMB = -101
class BaryStateFunc:
def __init__(self, body):
self.body = body
def Eval(self, time):
if self.body == _Body_GeoMoon:
return astronomy.GeoMoonState(time)
if self.body == _Body_Geo_EMB:
return astronomy.GeoEmbState(time)
return astronomy.BaryState(self.body, time)
def BaryState():
if VerifyStateBody(BaryStateFunc(astronomy.Body.Sun), 'barystate/Sun.txt', -1.224e-05, -1.134e-07): return 1
if VerifyStateBody(BaryStateFunc(astronomy.Body.Mercury), 'barystate/Mercury.txt', 1.672e-04, 2.698e-04): return 1
if VerifyStateBody(BaryStateFunc(astronomy.Body.Venus), 'barystate/Venus.txt', 4.123e-05, 4.308e-05): return 1
if VerifyStateBody(BaryStateFunc(astronomy.Body.Earth), 'barystate/Earth.txt', 2.296e-05, 6.359e-05): return 1
if VerifyStateBody(BaryStateFunc(astronomy.Body.Mars), 'barystate/Mars.txt', 3.107e-05, 5.550e-05): return 1
if VerifyStateBody(BaryStateFunc(astronomy.Body.Jupiter), 'barystate/Jupiter.txt', 7.389e-05, 2.471e-04): return 1
if VerifyStateBody(BaryStateFunc(astronomy.Body.Saturn), 'barystate/Saturn.txt', 1.067e-04, 3.220e-04): return 1
if VerifyStateBody(BaryStateFunc(astronomy.Body.Uranus), 'barystate/Uranus.txt', 9.035e-05, 2.519e-04): return 1
if VerifyStateBody(BaryStateFunc(astronomy.Body.Neptune), 'barystate/Neptune.txt', 9.838e-05, 4.446e-04): return 1
if VerifyStateBody(BaryStateFunc(astronomy.Body.Pluto), 'barystate/Pluto.txt', 4.259e-05, 7.827e-05): return 1
if VerifyStateBody(BaryStateFunc(astronomy.Body.Moon), "barystate/Moon.txt", 2.354e-05, 6.604e-05): return 1
if VerifyStateBody(BaryStateFunc(astronomy.Body.EMB), "barystate/EMB.txt", 2.353e-05, 6.511e-05): return 1
if VerifyStateBody(BaryStateFunc(_Body_GeoMoon), "barystate/GeoMoon.txt", 4.086e-05, 5.347e-05): return 1
if VerifyStateBody(BaryStateFunc(_Body_Geo_EMB), "barystate/GeoEMB.txt", 4.076e-05, 5.335e-05): return 1
print('PY BaryState: PASS')
return 0
#-----------------------------------------------------------------------------------------------------------
class HelioStateFunc:
def __init__(self, body):
self.body = body
def Eval(self, time):
return astronomy.HelioState(self.body, time)
def HelioState():
if VerifyStateBody(HelioStateFunc(astronomy.Body.SSB), 'heliostate/SSB.txt', -1.209e-05, -1.125e-07): return 1
if VerifyStateBody(HelioStateFunc(astronomy.Body.Mercury), 'heliostate/Mercury.txt', 1.481e-04, 2.756e-04): return 1
if VerifyStateBody(HelioStateFunc(astronomy.Body.Venus), 'heliostate/Venus.txt', 3.528e-05, 4.485e-05): return 1
if VerifyStateBody(HelioStateFunc(astronomy.Body.Earth), 'heliostate/Earth.txt', 1.476e-05, 6.105e-05): return 1
if VerifyStateBody(HelioStateFunc(astronomy.Body.Mars), 'heliostate/Mars.txt', 3.154e-05, 5.603e-05): return 1
if VerifyStateBody(HelioStateFunc(astronomy.Body.Jupiter), 'heliostate/Jupiter.txt', 7.455e-05, 2.562e-04): return 1
if VerifyStateBody(HelioStateFunc(astronomy.Body.Saturn), 'heliostate/Saturn.txt', 1.066e-04, 3.150e-04): return 1
if VerifyStateBody(HelioStateFunc(astronomy.Body.Uranus), 'heliostate/Uranus.txt', 9.034e-05, 2.712e-04): return 1
if VerifyStateBody(HelioStateFunc(astronomy.Body.Neptune), 'heliostate/Neptune.txt', 9.834e-05, 4.534e-04): return 1
if VerifyStateBody(HelioStateFunc(astronomy.Body.Pluto), 'heliostate/Pluto.txt', 4.271e-05, 1.198e-04): return 1
if VerifyStateBody(HelioStateFunc(astronomy.Body.Moon), 'heliostate/Moon.txt', 1.477e-05, 6.195e-05): return 1
if VerifyStateBody(HelioStateFunc(astronomy.Body.EMB), 'heliostate/EMB.txt', 1.476e-05, 6.106e-05): return 1
print('PY HelioState: PASS')
return 0
#-----------------------------------------------------------------------------------------------------------
class TopoStateFunc:
def __init__(self, body):
self.body = body
def Eval(self, time):
observer = astronomy.Observer(30.0, -80.0, 1000.0)
observer_state = astronomy.ObserverState(time, observer, False)
if self.body == _Body_Geo_EMB:
state = astronomy.GeoEmbState(time)
elif self.body == astronomy.Body.Earth:
state = astronomy.StateVector(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, time)
else:
raise Exception('PY TopoStateFunction: unsupported body ' + self.body)
state.x -= observer_state.x
state.y -= observer_state.y
state.z -= observer_state.z
state.vx -= observer_state.vx
state.vy -= observer_state.vy
state.vz -= observer_state.vz
return state
def TopoState():
if VerifyStateBody(TopoStateFunc(astronomy.Body.Earth), 'topostate/Earth_N30_W80_1000m.txt', 2.108e-04, 2.430e-04): return 1
if VerifyStateBody(TopoStateFunc(_Body_Geo_EMB), 'topostate/EMB_N30_W80_1000m.txt', 7.197e-04, 2.497e-04): return 1
print('PY TopoState: PASS')
return 0
#-----------------------------------------------------------------------------------------------------------
def Aberration():
THRESHOLD_SECONDS = 0.453
filename = 'equatorial/Mars_j2000_ofdate_aberration.txt'
count = 0
with open(filename, 'rt') as infile:
lnum = 0
found_begin = False
max_diff_seconds = 0.0
for line in infile:
lnum += 1
line = line.rstrip()
if not found_begin:
if line == '$$SOE':
found_begin = True
elif line == '$$EOE':
break
else:
# 2459371.500000000 * 118.566080210 22.210647456 118.874086738 22.155784122
token = line.split()
if len(token) < 5:
print('PY Aberration({} line {}): not enough tokens'.format(filename, lnum))
return 1
jd = float(token[0])
jra = float(token[-4])
jdec = float(token[-3])
dra = float(token[-2])
ddec = float(token[-1])
# Convert julian day value to AstroTime.
time = astronomy.Time(jd - 2451545.0)
# Convert EQJ angular coordinates (jra, jdec) to an EQJ vector.
# Make the maginitude of the vector the speed of light,
# to prepare for aberration correction.
eqj_sphere = astronomy.Spherical(jdec, jra, astronomy.C_AUDAY)
eqj_vec = astronomy.VectorFromSphere(eqj_sphere, time)
# Aberration correction: calculate the Earth's barycentric
# velocity vector in EQJ coordinates.
eqj_earth = astronomy.BaryState(astronomy.Body.Earth, time)
# Use non-relativistic approximation: add light vector to Earth velocity vector.
# This gives aberration-corrected apparent position of the start in EQJ.
eqj_vec.x += eqj_earth.vx
eqj_vec.y += eqj_earth.vy
eqj_vec.z += eqj_earth.vz
# Calculate the rotation matrix that converts J2000 coordinates to of-date coordinates.
rot = astronomy.Rotation_EQJ_EQD(time)
# Use the rotation matrix to re-orient the EQJ vector to an EQD vector.
eqd_vec = astronomy.RotateVector(rot, eqj_vec)
# Convert the EQD vector back to spherical angular coordinates.
eqd_sphere = astronomy.SphereFromVector(eqd_vec)
# Calculate the differences in RA and DEC between expected and calculated values.
factor = math.cos(math.radians(v(eqd_sphere.lat))) # RA errors are less important toward the poles.
xra = factor * vabs(eqd_sphere.lon - dra)
xdec = vabs(eqd_sphere.lat - ddec)
diff_seconds = 3600.0 * sqrt(xra*xra + xdec*xdec)
Debug('PY Aberration({} line {}): xra={:0.6f} deg, xdec={:0.6f} deg, diff_seconds={:0.3f}.'.format(filename, lnum, xra, xdec, diff_seconds))
if diff_seconds > THRESHOLD_SECONDS:
print('PY Aberration({} line {}): EXCESSIVE ANGULAR ERROR = {:0.3f} seconds.'.format(filename, lnum, diff_seconds));
return 1
if diff_seconds > max_diff_seconds:
max_diff_seconds = diff_seconds
# We have completed one more test case.
count += 1
print('PY AberrationTest({}): PASS - Tested {} cases. max_diff_seconds = {:0.3f}'.format(filename, count, max_diff_seconds))
return 0
#-----------------------------------------------------------------------------------------------------------
def Twilight():
tolerance_seconds = 60.0
max_diff = 0.0
filename = 'riseset/twilight.txt'
with open(filename, 'rt') as infile:
lnum = 0
for line in infile:
lnum += 1
tokens = line.split()
if len(tokens) != 9:
print('PY Twilight({} line {}): incorrect number of tokens = {}'.format(filename, lnum, len(tokens)))
return 1
observer = astronomy.Observer(float(tokens[0]), float(tokens[1]), 0.0)
searchDate = astronomy.Time.Parse(tokens[2])
correctTimes = [astronomy.Time.Parse(t) for t in tokens[3:]]
calcTimes = [
astronomy.SearchAltitude(astronomy.Body.Sun, observer, astronomy.Direction.Rise, searchDate, 1.0, -18.0), # astronomical dawn
astronomy.SearchAltitude(astronomy.Body.Sun, observer, astronomy.Direction.Rise, searchDate, 1.0, -12.0), # nautical dawn
astronomy.SearchAltitude(astronomy.Body.Sun, observer, astronomy.Direction.Rise, searchDate, 1.0, -6.0), # civil dawn
astronomy.SearchAltitude(astronomy.Body.Sun, observer, astronomy.Direction.Set, searchDate, 1.0, -6.0), # civil dusk
astronomy.SearchAltitude(astronomy.Body.Sun, observer, astronomy.Direction.Set, searchDate, 1.0, -12.0), # nautical dusk
astronomy.SearchAltitude(astronomy.Body.Sun, observer, astronomy.Direction.Set, searchDate, 1.0, -18.0) # astronomical dusk
]
for i in range(6):
correct = correctTimes[i]
calc = calcTimes[i]
diff = SECONDS_PER_DAY * vabs(calc.ut - correct.ut)
if diff > tolerance_seconds:
print('PY Twilight({} line {}): EXCESSIVE ERROR = {} seconds for case {}'.format(filename, lnum, diff, i))
return 1
if diff > max_diff:
max_diff = diff
print('PY Twilight: PASS ({} test cases, max error = {} seconds)'.format(lnum, max_diff))
return 0
#-----------------------------------------------------------------------------------------------------------
def LibrationFile(filename):
max_diff_elon = 0.0
max_diff_elat = 0.0
max_diff_distance = 0.0
max_diff_diam = 0.0
max_eclip_lon = -900.0
count = 0
with open(filename, 'rt') as infile:
lnum = 0
for line in infile:
lnum += 1
token = line.split()
if lnum == 1:
if line != " Date Time Phase Age Diam Dist RA Dec Slon Slat Elon Elat AxisA\n":
print('PY LibrationFile({} line {}): unexpected header line'.format(filename, lnum))
return 1
else:
if len(token) != 16:
print('PY LibrationFile({} line {}): expected 16 tokens, found {}'.format(filename, lnum, len(token)))
return 1
day = int(token[0])
month = MonthNumber(token[1])
year = int(token[2])
hmtoken = token[3].split(':')
if len(hmtoken) != 2:
print('PY LibrationFile({} line {}): expected hh:mm but found "{}"'.format(filename, lnum, hmtoken))
return 1
hour = int(hmtoken[0])
minute = int(hmtoken[1])
time = astronomy.Time.Make(year, month, day, hour, minute, 0.0)
diam = float(token[7]) / 3600.0
dist = float(token[8])
elon = float(token[13])
elat = float(token[14])
lib = astronomy.Libration(time)
diff_elon = 60.0 * vabs(lib.elon - elon)
if diff_elon > max_diff_elon:
max_diff_elon = diff_elon
diff_elat = 60.0 * vabs(lib.elat - elat)
if diff_elat > max_diff_elat:
max_diff_elat = diff_elat
diff_distance = vabs(lib.dist_km - dist)
if diff_distance > max_diff_distance:
max_diff_distance = diff_distance
diff_diam = vabs(lib.diam_deg - diam)
if diff_diam > max_diff_diam:
max_diff_diam = diff_diam
if lib.mlon > max_eclip_lon:
max_eclip_lon = lib.mlon
if diff_elon > 0.1304:
print('PY LibrationFile({} line {}): EXCESSIVE diff_elon = {}'.format(filename, lnum, diff_elon))
return 1
if diff_elat > 1.6476:
print('PY LibrationFile({} line {}): EXCESSIVE diff_elat = {}'.format(filename, lnum, diff_elat))
return 1
if diff_distance > 54.377:
print('PY LibrationFile({} line {}): EXCESSIVE diff_distance = {}'.format(filename, lnum, diff_distance))
return 1
if diff_diam > 0.00009:
print('PY LibrationFile({} line {}): EXCESSIVE diff_diam = {}'.format(filename, lnum, diff_diam))
return 1
count += 1
if not (359.0 < max_eclip_lon < 360.0):
print('PY LibrationFile({}): INVALID max ecliptic longitude = {:0.3f}'.format(filename, max_eclip_lon))
return 1
print('PY LibrationFile({}): PASS ({} test cases, max_diff_elon = {} arcmin, max_diff_elat = {} arcmin, max_diff_distance = {} km, max_diff_diam = {} deg)'.format(
filename, count, max_diff_elon, max_diff_elat, max_diff_distance, max_diff_diam
))
return 0
def Libration():
return (
LibrationFile('libration/mooninfo_2020.txt') or
LibrationFile('libration/mooninfo_2021.txt') or
LibrationFile('libration/mooninfo_2022.txt')
)
#-----------------------------------------------------------------------------------------------------------
def Axis():
if AxisTestBody(astronomy.Body.Sun, 'axis/Sun.txt', 0.0) : return 1
if AxisTestBody(astronomy.Body.Mercury, 'axis/Mercury.txt', 0.074340) : return 1
if AxisTestBody(astronomy.Body.Venus, 'axis/Venus.txt', 0.0) : return 1
if AxisTestBody(astronomy.Body.Earth, 'axis/Earth.txt', 0.002032) : return 1
if AxisTestBody(astronomy.Body.Moon, 'axis/Moon.txt', 0.264845) : return 1
if AxisTestBody(astronomy.Body.Mars, 'axis/Mars.txt', 0.075323) : return 1
if AxisTestBody(astronomy.Body.Jupiter, 'axis/Jupiter.txt', 0.000324) : return 1
if AxisTestBody(astronomy.Body.Saturn, 'axis/Saturn.txt', 0.000304) : return 1
if AxisTestBody(astronomy.Body.Uranus, 'axis/Uranus.txt', 0.0) : return 1
if AxisTestBody(astronomy.Body.Neptune, 'axis/Neptune.txt', 0.000464) : return 1
if AxisTestBody(astronomy.Body.Pluto, 'axis/Pluto.txt', 0.0) : return 1
print('PY AxisTest: PASS')
return 0
def AxisTestBody(body, filename, arcmin_tolerance):
max_arcmin = 0
lnum = 0
count = 0
found_data = False
with open(filename, 'rt') as infile:
for line in infile:
line = line.strip()
lnum += 1
if not found_data:
if line == '$$SOE':
found_data = True
else:
if line == '$$EOE':
break
token = line.split()
# [ '1970-Jan-01', '00:00', '2440587.500000000', '281.01954', '61.41577' ]
jd = float(token[2])
ra = float(token[3])
dec = float(token[4])
time = astronomy.Time(jd - 2451545.0)
axis = astronomy.RotationAxis(body, time)
sphere = astronomy.Spherical(dec, ra, 1.0)
north = astronomy.VectorFromSphere(sphere, time)
arcmin = 60.0 * astronomy.AngleBetween(north, axis.north)
if arcmin > max_arcmin:
max_arcmin = arcmin
count += 1
Debug('PY AxisTestBody({}): {} test cases, max arcmin error = {}.'.format(body, count, max_arcmin))
if max_arcmin > arcmin_tolerance:
print('PY AxisTestBody({}): EXCESSIVE ERROR = {}'.format(body, max_arcmin))
return 1
return 0
#-----------------------------------------------------------------------------------------------------------
def MoonNodes():
filename = 'moon_nodes/moon_nodes.txt'
with open(filename, 'rt') as infile:
max_angle = 0.0
max_minutes = 0.0
prev_kind = '?'
lnum = 0
for line in infile:
line = line.strip()
lnum += 1
token = line.split()
if len(token) != 4:
print('PY MoonNodes({} line {}): syntax error'.format(filename, lnum))
return 1
kind = token[0]
if kind not in 'AD':
print('PY MoonNodes({} line {}): invalid node kind'.format(filename, lnum))
return 1
if kind == prev_kind:
print('PY MoonNodes({} line {}): duplicate ascending/descending node'.format(filename, lnum))
return 1
time = astronomy.Time.Parse(token[1])
ra = float(token[2])
dec = float(token[3])
sphere = astronomy.Spherical(dec, 15.0 * ra, 1.0)
vec_test = astronomy.VectorFromSphere(sphere, time)
# Calculate EQD coordinates of the Moon. Verify against input file.
vec_eqj = astronomy.GeoMoon(time)
rot = astronomy.Rotation_EQJ_EQD(time)
vec_eqd = astronomy.RotateVector(rot, vec_eqj)
angle = astronomy.AngleBetween(vec_test, vec_eqd)
diff_angle = 60.0 * abs(angle)
if diff_angle > max_angle:
max_angle = diff_angle
if diff_angle > 1.54:
print('PY MoonNodes({} line {}): EXCESSIVE equatorial error = {:0.3f} arcmin'.format(filename, lnum, diff_angle))
if lnum == 1:
# The very first time, so search for the first node in the series.
# Back up a few days to make sure we really are finding it ourselves.
earlier = time.AddDays(-6.5472) # back up by a weird amount of time
node = astronomy.SearchMoonNode(earlier)
else:
# Use the previous node to find the next node.
node = astronomy.NextMoonNode(node)
# Verify the ecliptic latitude is very close to zero at the alleged node.
ecl = astronomy.EclipticGeoMoon(node.time)
diff_lat = 60.0 * abs(ecl.lat)
if diff_lat > 8.1e-4:
print('PY MoonNodes({} line {}): found node has excessive latitude = {:0.4f} arcmin.'.format(filename, lnum, diff_lat))
return 1
# Verify the time agrees with Espenak's time to within a few minutes.
diff_minutes = (24.0 * 60.0) * abs(node.time.tt - time.tt)
if diff_minutes > max_minutes:
max_minutes = diff_minutes
# Verify the kind of node matches what Espenak says (ascending or descending).
if kind == 'A' and node.kind != astronomy.NodeEventKind.Ascending:
print('PY MoonNodes({} line {}): did not find ascending node as expected.'.format(filename, lnum))
return 1
if kind == 'D' and node.kind != astronomy.NodeEventKind.Descending:
print('PY MoonNodes({} line {}): did not find descending node as expected.'.format(filename, lnum))
return 1
prev_kind = kind
if max_minutes > 3.681:
print('PY MoonNodes: EXCESSIVE time prediction error = {:0.3f} minutes.'.format(max_minutes))
return 1
print('PY MoonNodes: PASS ({} nodes, max equ error = {:0.3f} arcmin, max time error = {:0.3f} minutes.)'.format(lnum, max_angle, max_minutes))
return 0
#-----------------------------------------------------------------------------------------------------------
def MoonReversePhase(longitude):
# Verify that SearchMoonPhase works both forward and backward in time.
nphases = 5000
utList = []
dtMin = +1000.0
dtMax = -1000.0
# Search forward in time from 1800 to find consecutive phase events events.
time = astronomy.Time.Make(1800, 1, 1, 0, 0, 0.0)
for i in range(nphases):
result = astronomy.SearchMoonPhase(longitude, time, +40.0)
if result is None:
print('PY MoonReversePhase(lon={}, i={}): failed to find event after {}'.format(longitude, i, time))
return 1
utList.append(result.ut)
if i > 0:
# Verify that consecutive events are reasonably close to the synodic period (29.5 days) apart.
dt = v(utList[i] - utList[i-1])
if dt < dtMin:
dtMin = dt
if dt > dtMax:
dtMax = dt
time = result.AddDays(+0.1)
Debug('PY MoonReversePhase({}): dtMin={:0.6f} days, dtMax={:0.6f} days.'.format(longitude, dtMin, dtMax))
if (dtMin < 29.175) or (dtMax > 29.926):
print('PY MoonReversePhase({}): Time between consecutive events is suspicious.'.format(longitude))
return 1
# Do a reverse chronological search and make sure the results are consistent with the forward search.
time = time.AddDays(20.0)
maxDiff = 0.0
for i in range(nphases-1, -1, -1):
result = astronomy.SearchMoonPhase(longitude, time, -40.0)
if result is None:
print('PY MoonReversePhase(lon={}, i={}): failed to find event before {}'.format(longitude, i, time))
return 1
diff = SECONDS_PER_DAY * vabs(result.ut - utList[i])
if diff > maxDiff:
maxDiff = diff
time = result.AddDays(-0.1)
Debug('PY MoonReversePhase({}): Maximum discrepancy in reverse search = {:0.6f} seconds.'.format(longitude, maxDiff))
if maxDiff > 0.164:
print('PY MoonReversePhase({}): EXCESSIVE DISCREPANCY in reverse search.'.format(longitude))
return 1
# Pick a pair of consecutive events from the middle of the list.
# Verify forward and backward searches work correctly from many intermediate times.
nslots = 100
k = nphases // 2
ut1 = utList[k]
ut2 = utList[k+1]
for i in range(1, nslots):
ut = ut1 + (i/nslots)*(ut2 - ut1)
time = astronomy.Time(ut)
before = astronomy.SearchMoonPhase(longitude, time, -40.0)
if before is None:
print('PY MoonReversePhase(lon={}, time={}): backward search failed'.format(longitude, time))
return 1
diff = SECONDS_PER_DAY * vabs(before.ut - ut1)
if diff > 0.07:
print('PY MoonReversePhase(lon={}, time={}): backward search error = {:0.4e} seconds'.format(longitude, time, diff))
return 1
after = astronomy.SearchMoonPhase(longitude, time, +40.0)
if after is None:
print('PY MoonReversePhase(lon={}, time={}): forward search failed'.format(longitude, time))
return 1
diff = SECONDS_PER_DAY * vabs(after.ut - ut2)
if diff > 0.07:
print('PY MoonReversePhase(lon={}, time={}): forward search error = {:0.4e} seconds'.format(longitude, time, diff))
return 1
print('PY MoonReversePhase({}): PASS'.format(longitude))
return 0
def MoonReverse():
return (
MoonReversePhase(0.0) or
MoonReversePhase(90.0) or
MoonReversePhase(180.0) or
MoonReversePhase(270.0)
)
#-----------------------------------------------------------------------------------------------------------
class LagrangeFunc:
def __init__(self, point, major_body, minor_body):
self.point = point
self.major_body = major_body
self.minor_body = minor_body
def Eval(self, time):
return astronomy.LagrangePoint(self.point, time, self.major_body, self.minor_body)
def VerifyStateLagrange(major_body, minor_body, point, filename, r_thresh, v_thresh):
func = LagrangeFunc(point, major_body, minor_body)
return VerifyStateBody(func, filename, r_thresh, v_thresh)
def Lagrange():
# Test Sun/EMB Lagrange points.
if VerifyStateLagrange(astronomy.Body.Sun, astronomy.Body.EMB, 1, 'lagrange/semb_L1.txt', 1.33e-5, 6.13e-5): return 1
if VerifyStateLagrange(astronomy.Body.Sun, astronomy.Body.EMB, 2, 'lagrange/semb_L2.txt', 1.33e-5, 6.13e-5): return 1
if VerifyStateLagrange(astronomy.Body.Sun, astronomy.Body.EMB, 4, 'lagrange/semb_L4.txt', 3.75e-5, 5.28e-5): return 1
if VerifyStateLagrange(astronomy.Body.Sun, astronomy.Body.EMB, 5, 'lagrange/semb_L5.txt', 3.75e-5, 5.28e-5): return 1
# Test Earth/Moon Lagrange points.
if VerifyStateLagrange(astronomy.Body.Earth, astronomy.Body.Moon, 1, 'lagrange/em_L1.txt', 3.79e-5, 5.06e-5): return 1
if VerifyStateLagrange(astronomy.Body.Earth, astronomy.Body.Moon, 2, 'lagrange/em_L2.txt', 3.79e-5, 5.06e-5): return 1
if VerifyStateLagrange(astronomy.Body.Earth, astronomy.Body.Moon, 4, 'lagrange/em_L4.txt', 3.79e-5, 1.59e-3): return 1
if VerifyStateLagrange(astronomy.Body.Earth, astronomy.Body.Moon, 5, 'lagrange/em_L5.txt', 3.79e-5, 1.59e-3): return 1
print('PY Lagrange: PASS')
return 0
#-----------------------------------------------------------------------------------------------------------
def SiderealTime():
correct = 9.3983699280076483
time = astronomy.Time.Make(2022, 3, 15, 21, 50, 0)
gast = astronomy.SiderealTime(time)
diff = abs(gast - correct)
print('PY SiderealTime: gast={:0.15f}, correct={:0.15f}, diff={:0.3e}'.format(gast, correct, diff))
if diff > 1.0e-15:
print('PY SiderealTime: EXCESSIVE ERROR')
return 1
print('PY SiderealTime: PASS')
return 0
#-----------------------------------------------------------------------------------------------------------
def Repr():
time = astronomy.Time.Make(2022, 3, 31, 21, 4, 45.123)
if str(time) != '2022-03-31T21:04:45.123Z':
print('PY Repr: FAIL str(time)')
return 1
if repr(time) != "Time('2022-03-31T21:04:45.123Z')":
print('PY Repr: FAIL repr(time)')
return 1
vec = astronomy.Vector(-1.8439088914585775, 1.51657508881031, 0.8366600265340756, time)
if repr(vec) != "Vector(-1.8439088914585775, 1.51657508881031, 0.8366600265340756, Time('2022-03-31T21:04:45.123Z'))":
print('PY Repr: FAIL repr(vec)')
return 1
state = astronomy.StateVector(vec.x, vec.y, vec.z, -vec.x/3, -vec.y/3, -vec.z/3, vec.t)
if repr(state) != "StateVector(x=-1.8439088914585775, y=1.51657508881031, z=0.8366600265340756, vx=0.6146362971528592, vy=-0.5055250296034367, vz=-0.27888667551135854, t=Time('2022-03-31T21:04:45.123Z'))":
print('PY Repr: FAIL repr(state)')
return 1
observer = astronomy.Observer(32.1, 45.6, 98.765)
if repr(observer) != 'Observer(latitude=32.1, longitude=45.6, height=98.765)':
print('PY Repr: FAIL repr(observer)')
return 1
rot = astronomy.Rotation_EQJ_ECL()
if repr(rot) != 'RotationMatrix([[1, 0, 0], [0, 0.9174821430670688, -0.3977769691083922], [0, 0.3977769691083922, 0.9174821430670688]])':
print('PY Repr: FAIL repr(rot)')
return 1
sph = astronomy.Spherical(lat=-27.3, lon=85.2, dist=2.54)
if repr(sph) != 'Spherical(lat=-27.3, lon=85.2, dist=2.54)':
print('PY Repr: FAIL repr(sph)')
return 1
equ = astronomy.Equatorial(8.54, -23.753, 2.986, vec)
if repr(equ) != "Equatorial(ra=8.54, dec=-23.753, dist=2.986, vec=Vector(-1.8439088914585775, 1.51657508881031, 0.8366600265340756, Time('2022-03-31T21:04:45.123Z')))":
print('PY Repr: FAIL repr(equ)')
return 1
print('PY Repr: PASS')
return 0
#-----------------------------------------------------------------------------------------------------------
def GravSimTest():
Debug("")
if 0 != GravSimEmpty("barystate/Sun.txt", astronomy.Body.SSB, astronomy.Body.Sun, 0.0269, 1.9635): return 1
if 0 != GravSimEmpty("barystate/Mercury.txt", astronomy.Body.SSB, astronomy.Body.Mercury, 0.5725, 0.9332): return 1
if 0 != GravSimEmpty("barystate/Venus.txt", astronomy.Body.SSB, astronomy.Body.Venus, 0.1433, 0.1458): return 1
if 0 != GravSimEmpty("barystate/Earth.txt", astronomy.Body.SSB, astronomy.Body.Earth, 0.0651, 0.2098): return 1
if 0 != GravSimEmpty("barystate/Mars.txt", astronomy.Body.SSB, astronomy.Body.Mars, 0.1150, 0.1896): return 1
if 0 != GravSimEmpty("barystate/Jupiter.txt", astronomy.Body.SSB, astronomy.Body.Jupiter, 0.2546, 0.8831): return 1
if 0 != GravSimEmpty("barystate/Saturn.txt", astronomy.Body.SSB, astronomy.Body.Saturn, 0.3660, 1.0818): return 1
if 0 != GravSimEmpty("barystate/Uranus.txt", astronomy.Body.SSB, astronomy.Body.Uranus, 0.3107, 0.9321): return 1
if 0 != GravSimEmpty("barystate/Neptune.txt", astronomy.Body.SSB, astronomy.Body.Neptune, 0.3382, 1.5586): return 1
if 0 != GravSimEmpty("heliostate/Mercury.txt", astronomy.Body.Sun, astronomy.Body.Mercury, 0.5087, 0.9473): return 1
if 0 != GravSimEmpty("heliostate/Venus.txt", astronomy.Body.Sun, astronomy.Body.Venus, 0.1214, 0.1543): return 1
if 0 != GravSimEmpty("heliostate/Earth.txt", astronomy.Body.Sun, astronomy.Body.Earth, 0.0508, 0.2099): return 1
if 0 != GravSimEmpty("heliostate/Mars.txt", astronomy.Body.Sun, astronomy.Body.Mars, 0.1085, 0.1927): return 1
if 0 != GravSimEmpty("heliostate/Jupiter.txt", astronomy.Body.Sun, astronomy.Body.Jupiter, 0.2564, 0.8805): return 1
if 0 != GravSimEmpty("heliostate/Saturn.txt", astronomy.Body.Sun, astronomy.Body.Saturn, 0.3664, 1.0826): return 1
if 0 != GravSimEmpty("heliostate/Uranus.txt", astronomy.Body.Sun, astronomy.Body.Uranus, 0.3106, 0.9322): return 1
if 0 != GravSimEmpty("heliostate/Neptune.txt", astronomy.Body.Sun, astronomy.Body.Neptune, 0.3381, 1.5584): return 1
Debug("")
nsteps = 20
if 0 != GravSimFile("barystate/Ceres.txt", astronomy.Body.SSB, nsteps, 0.6640, 0.6226): return 1
if 0 != GravSimFile("barystate/Pallas.txt", astronomy.Body.SSB, nsteps, 0.4687, 0.3474): return 1
if 0 != GravSimFile("barystate/Vesta.txt", astronomy.Body.SSB, nsteps, 0.5806, 0.5462): return 1
if 0 != GravSimFile("barystate/Juno.txt", astronomy.Body.SSB, nsteps, 0.6760, 0.5750): return 1
if 0 != GravSimFile("barystate/Bennu.txt", astronomy.Body.SSB, nsteps, 3.7444, 2.6581): return 1
if 0 != GravSimFile("barystate/Halley.txt", astronomy.Body.SSB, nsteps, 0.0539, 0.0825): return 1
if 0 != GravSimFile("heliostate/Ceres.txt", astronomy.Body.Sun, nsteps, 0.0445, 0.0355): return 1
if 0 != GravSimFile("heliostate/Pallas.txt", astronomy.Body.Sun, nsteps, 0.1062, 0.0854): return 1
if 0 != GravSimFile("heliostate/Vesta.txt", astronomy.Body.Sun, nsteps, 0.1432, 0.1308): return 1
if 0 != GravSimFile("heliostate/Juno.txt", astronomy.Body.Sun, nsteps, 0.1554, 0.1328): return 1
if 0 != GravSimFile("geostate/Ceres.txt", astronomy.Body.Earth, nsteps, 6.5689, 6.4797): return 1
if 0 != GravSimFile("geostate/Pallas.txt", astronomy.Body.Earth, nsteps, 9.3288, 7.3533): return 1
if 0 != GravSimFile("geostate/Vesta.txt", astronomy.Body.Earth, nsteps, 3.2980, 3.8863): return 1
if 0 != GravSimFile("geostate/Juno.txt", astronomy.Body.Earth, nsteps, 6.0962, 7.7147): return 1
Debug("")
print("PY GravSimTest: PASS")
return 0
def GravSimEmpty(filename, origin, body, rthresh, vthresh):
max_rdiff = 0.0
max_vdiff = 0.0
sim = None
for rec in JplHorizonsStateVectors(filename):
if sim is None:
sim = astronomy.GravitySimulator(origin, rec.state.t, [])
sim.Update(rec.state.t)
calc = sim.SolarSystemBodyState(body)
if origin == astronomy.Body.SSB and body == astronomy.Body.Sun:
rdiff = SsbArcminPosError(rec.state, calc)
else:
rdiff = ArcminPosError(rec.state, calc)
if rdiff > rthresh:
print('PY GravSimEmpty({} line {}): excessive position error = {} arcmin.'.format(filename, rec.lnum, rdiff))
return 1
if rdiff > max_rdiff:
max_rdiff = rdiff
vdiff = ArcminVelError(rec.state, calc)
if vdiff > vthresh:
print('PY GravSimEmpty({} line {}): excessive velocity error = {} arcmin.'.format(filename, rec.lnum, vdiff))
return 1
if vdiff > max_vdiff:
max_vdiff = vdiff
Debug('PY GravSimEmpty({:22s}): PASS - max pos error = {:0.4f} arcmin, max vel error = {:0.4f} arcmin.'.format(filename, max_rdiff, max_vdiff))
return 0
def GravSimFile(filename, originBody, nsteps, rthresh, vthresh):
sim = None
max_rdiff = 0.0
max_vdiff = 0.0
for rec in JplHorizonsStateVectors(filename):
if sim is None:
sim = astronomy.GravitySimulator(originBody, rec.state.t, [rec.state])
time = rec.state.t
smallBodyArray = sim.Update(time)
else:
tt1 = prev.state.t.tt
tt2 = rec.state.t.tt
dt = (tt2 - tt1) / nsteps
for k in range(1, nsteps+1):
time = astronomy.Time.FromTerrestrialTime(tt1 + k*dt)
smallBodyArray = sim.Update(time)
if len(smallBodyArray) != 1:
print('PY GravSimFile({} line {}): unexpected smallBodyArray.length = {}'.format(filename, rec.lnum, len(smallBodyArray)))
return 1
if time.tt != sim.GetTime().tt:
print('PY GravSimFile({} line {}): expected {} but simulator reports {}'.format(filename, rec.lnum, time, sim.GetTime()))
return 1
rdiff = ArcminPosError(rec.state, smallBodyArray[0])
if rdiff > rthresh:
print('PY GravSimFile({} line {}): excessive position error = {}'.format(filename, rec.lnum, rdiff))
return 1
if rdiff > max_rdiff:
max_rdiff = rdiff
vdiff = ArcminVelError(rec.state, smallBodyArray[0])
if vdiff > vthresh:
print('PY GravSimFile({} line {}): excessive position error = {}'.format(filename, rec.lnum, vdiff))
return 1
if vdiff > max_vdiff:
max_vdiff = vdiff
prev = rec
Debug('PY GravSimFile({:22s}): PASS - max pos error = {:0.4f} arcmin, max vel error = {:0.4f} arcmin.'.format(filename, max_rdiff, max_vdiff))
return 0
def SsbArcminPosError(correct, calc):
# Scale the SSB based on 1 AU, not on its absolute magnitude, which can become very close to zero.
dx = calc.x - correct.x
dy = calc.y - correct.y
dz = calc.z - correct.z
diffSquared = dx*dx + dy*dy + dz*dz
radians = sqrt(diffSquared)
return 60.0 * math.degrees(radians)
def ArcminPosError(correct, calc):
dx = calc.x - correct.x
dy = calc.y - correct.y
dz = calc.z - correct.z
diffSquared = dx*dx + dy*dy + dz*dz
magSquared = correct.x*correct.x + correct.y*correct.y + correct.z*correct.z
radians = sqrt(diffSquared / magSquared)
return 60.0 * math.degrees(radians)
def ArcminVelError(correct, calc):
dx = calc.vx - correct.vx
dy = calc.vy - correct.vy
dz = calc.vz - correct.vz
diffSquared = dx*dx + dy*dy + dz*dz
magSquared = correct.vx*correct.vx + correct.vy*correct.vy + correct.vz*correct.vz
radians = sqrt(diffSquared / magSquared)
return 60.0 * math.degrees(radians)
#-----------------------------------------------------------------------------------------------------------
def CheckDecemberSolstice(year, expected):
si = astronomy.Seasons(year)
actual = str(si.dec_solstice)
if actual != expected:
print('PY DatesIssue250: FAIL: year {}, expected [{}], actual [{}]'.format(year, expected, actual))
return 1
return 0
def DatesIssue250():
# Make sure we can handle dates outside the range supported by System.DateTime.
# https://github.com/cosinekitty/astronomy/issues/250
return (
CheckDecemberSolstice( 2022, "2022-12-21T21:47:54.455Z") or
CheckDecemberSolstice(-2300, "-002300-12-19T16:22:27.929Z") or
CheckDecemberSolstice(12345, "+012345-12-11T13:30:10.276Z") or
Pass('DatesIssue250')
)
#-----------------------------------------------------------------------------------------------------------
def LunarFractionCase(year, month, day, obscuration):
time = astronomy.Time.Make(year, month, day, 0, 0, 0.0)
eclipse = astronomy.SearchLunarEclipse(time)
# This should be a partial lunar eclipse.
if eclipse.kind != astronomy.EclipseKind.Partial:
print('PY LunarFractionCase({:04d}-{:02d}-{:02d}) FAIL: expected partial eclipse, but found {}.'.format(year, month, day, eclipse.kind))
return 1
# The partial eclipse should always happen within 24 hours of the given date.
dt = v(eclipse.peak.ut - time.ut)
if dt < 0.0 or dt > 1.0:
print('PY LunarFractionCase({:04d}-{:02d}-{:02d}) FAIL: eclipse occurs {:0.4f} days after predicted date.'.format(year, month, day, dt))
return 1
diff = v(eclipse.obscuration - obscuration)
if abs(diff) > 0.00763:
print('PY LunarFractionCase({:04d}-{:02d}-{:02d}) FAIL: excessive obscuration diff = {:0.8f}, expected = {:0.8f}, actual = {:0.8f}'.format(year, month, day, diff, obscuration, eclipse.obscuration))
return 1
Debug('PY LunarFractionCase({:04d}-{:02d}-{:02d}): obscuration diff = {:11.8f}'.format(year, month, day, diff))
return 0
def LunarFraction():
# Verify calculation of the fraction of the Moon's disc covered by the Earth's umbra during a partial eclipse.
# Data for this is more tedious to gather, because Espenak data does not contain it.
# We already verify fraction=0.0 for penumbral eclipses and fraction=1.0 for total eclipses in LunarEclipseTest.
return (
LunarFractionCase(2010, 6, 26, 0.506) or # https://www.timeanddate.com/eclipse/lunar/2010-june-26
LunarFractionCase(2012, 6, 4, 0.304) or # https://www.timeanddate.com/eclipse/lunar/2012-june-4
LunarFractionCase(2013, 4, 25, 0.003) or # https://www.timeanddate.com/eclipse/lunar/2013-april-25
LunarFractionCase(2017, 8, 7, 0.169) or # https://www.timeanddate.com/eclipse/lunar/2017-august-7
LunarFractionCase(2019, 7, 16, 0.654) or # https://www.timeanddate.com/eclipse/lunar/2019-july-16
LunarFractionCase(2021, 11, 19, 0.991) or # https://www.timeanddate.com/eclipse/lunar/2021-november-19
LunarFractionCase(2023, 10, 28, 0.060) or # https://www.timeanddate.com/eclipse/lunar/2023-october-28
LunarFractionCase(2024, 9, 18, 0.035) or # https://www.timeanddate.com/eclipse/lunar/2024-september-18
LunarFractionCase(2026, 8, 28, 0.962) or # https://www.timeanddate.com/eclipse/lunar/2026-august-28
LunarFractionCase(2028, 1, 12, 0.024) or # https://www.timeanddate.com/eclipse/lunar/2028-january-12
LunarFractionCase(2028, 7, 6, 0.325) or # https://www.timeanddate.com/eclipse/lunar/2028-july-6
LunarFractionCase(2030, 6, 15, 0.464) or # https://www.timeanddate.com/eclipse/lunar/2030-june-15
Pass('LunarFraction')
)
#-----------------------------------------------------------------------------------------------------------
def StarRiseSetCulmCase(starName, ra, dec, distLy, observer, year, month, day, riseHour, riseMinute, culmHour, culmMinute, setHour, setMinute):
func = 'StarRiseSetCulmCase({})'.format(starName)
# Calculate expected event times.
expectedRiseTime = astronomy.Time.Make(year, month, day, riseHour, riseMinute, 0.0)
expectedCulmTime = astronomy.Time.Make(year, month, day, culmHour, culmMinute, 0.0)
expectedSetTime = astronomy.Time.Make(year, month, day, setHour, setMinute, 0.0)
# Define a custom star object.
astronomy.DefineStar(astronomy.Body.Star1, ra, dec, distLy)
# Use Astronomy Engine to search for event times.
searchTime = astronomy.Time.Make(year, month, day, 0, 0, 0.0)
rise = astronomy.SearchRiseSet(astronomy.Body.Star1, observer, astronomy.Direction.Rise, searchTime, 1.0)
if rise is None:
return Fail(func, 'Star rise search failed.')
culm = astronomy.SearchHourAngle(astronomy.Body.Star1, observer, 0.0, searchTime, +1)
if culm is None:
return Fail(func, 'Star culmination search failed.')
set = astronomy.SearchRiseSet(astronomy.Body.Star1, observer, astronomy.Direction.Set, searchTime, 1.0)
if set is None:
return Fail(func, 'Star set search failed.')
# Compare expected times with calculated times.
rdiff = MINUTES_PER_DAY * vabs(expectedRiseTime.ut - rise.ut)
cdiff = MINUTES_PER_DAY * vabs(expectedCulmTime.ut - culm.time.ut)
sdiff = MINUTES_PER_DAY * vabs(expectedSetTime.ut - set.ut)
Debug("{}: minutes rdiff = {:0.4f}, cdiff = {:0.4f}, sdiff = {:0.4f}".format(func, rdiff, cdiff, sdiff))
if rdiff > 0.5: return Fail(func, "excessive rise time error = {:0.4f} minutes.".format(rdiff))
if cdiff > 0.5: return Fail(func, "excessive culm time error = {:0.4f} minutes.".format(cdiff))
if sdiff > 0.5: return Fail(func, "excessive set time error = {:0.4f} minutes.".format(sdiff))
return 0
def StarRiseSetCulm():
observer = astronomy.Observer(+25.77, -80.19, 0.0)
return (
StarRiseSetCulmCase("Sirius", 6.7525, -16.7183, 8.6, observer, 2022, 11, 21, 2, 37, 8, 6, 13, 34) or
StarRiseSetCulmCase("Sirius", 6.7525, -16.7183, 8.6, observer, 2022, 11, 25, 2, 22, 7, 50, 13, 18) or
StarRiseSetCulmCase("Canopus", 6.3992, -52.6956, 310.0, observer, 2022, 11, 21, 4, 17, 7, 44, 11, 11) or
StarRiseSetCulmCase("Canopus", 6.3992, -52.6956, 310.0, observer, 2022, 11, 25, 4, 1, 7, 28, 10, 56) or
Pass("StarRiseSetCulm")
)
#-----------------------------------------------------------------------------------------------------------
class HourAngleTester:
def __init__(self):
self.cases = 0
self.maxdiff = 0.0
def Case(self, latitude, longitude, hourAngle):
threshold = 0.1 / 3600 # SearchHourAngle() accuracy: 0.1 seconds converted to hours
observer = astronomy.Observer(latitude, longitude, 0)
startTime = astronomy.Time.Make(2023, 2, 11, 0, 0, 0)
search = astronomy.SearchHourAngle(astronomy.Body.Sun, observer, hourAngle, startTime, +1)
calc = astronomy.HourAngle(astronomy.Body.Sun, search.time, observer)
diff = vabs(calc - hourAngle)
if diff > 12.0:
diff = 24.0 - diff;
if diff > self.maxdiff:
self.maxdiff = diff
self.cases += 1
if diff > threshold:
print('PY HourAngleCase: EXCESSIVE ERROR = {:0.6e}, calc HA = {:0.16f}, for hourAngle={:0.1f}'.format(diff, calc, hourAngle))
return False
Debug('PY HourAngleCase: Hour angle = {:4.1f}, longitude = {:6.1f}, diff = {:9.4e}'.format(hourAngle, longitude, diff))
return True
def Pass(self):
print('PY HourAngle ({:d} cases, maxdiff = {:9.4e}): PASS'.format(self.cases, self.maxdiff))
return 0
def HourAngle():
tester = HourAngleTester()
latitude = 35
longitude = -170
while longitude <= 180:
hour = 0
while hour < 24:
if not tester.Case(latitude, longitude, hour):
return 1
hour += 1
longitude += 5
return tester.Pass()
#-----------------------------------------------------------------------------------------------------------
def Atmosphere():
filename = 'riseset/atmosphere.csv'
maxdiff = 0.0
ncases = 0
tolerance = 8.8e-11
with open(filename, 'rt') as infile:
lnum = 0
for line in infile:
line = line.strip()
lnum += 1
if lnum == 1:
if line != 'elevation,temperature,pressure,density,relative_density':
return Fail('Atmosphere', 'Expected header line but found [{}]'.format(line))
else:
tokens = line.split(',')
if len(tokens) != 5:
return Fail('Atmosphere({} line {})'.format(filename, lnum), 'expected 5 numeric tokens but found {}'.format(len(tokens)))
elevation = v(float(tokens[0]))
temperature = v(float(tokens[1]))
pressure = v(float(tokens[2]))
# ignore tokens[3] = absolute_density
relative_density = v(float(tokens[4]))
atmos = astronomy.Atmosphere(elevation)
diff = vabs(atmos.temperature - temperature)
maxdiff = max(maxdiff, diff)
if diff > tolerance:
return Fail('Atmosphere', 'EXCESSIVE temperature difference = {}'.format(diff))
diff = vabs(atmos.pressure - pressure)
maxdiff = max(maxdiff, diff)
if diff > tolerance:
return Fail('Atmosphere', 'EXCESSIVE pressure difference = {}'.format(diff))
diff = vabs(atmos.density - relative_density)
maxdiff = max(maxdiff, diff)
if diff > tolerance:
return Fail('Atmosphere', 'EXCESSIVE density difference = {}'.format(diff))
ncases += 1
if ncases != 34:
return Fail('Atmosphere', 'expected 34 cases but found {}'.format(ncases))
return Pass('Atmosphere')
#-----------------------------------------------------------------------------------------------------------
def RiseSetElevationBodyCase(body, observer, direction, metersAboveGround, startTime, eventOffsetDays):
time = astronomy.SearchRiseSet(body, observer, direction, startTime, 2.0, metersAboveGround)
if not time:
return Fail('RiseSetElevationBodyCase {} {}: search failed.'.format(body, direction))
diff = v(time.ut - (startTime.ut + eventOffsetDays))
if diff > 0.5:
diff -= 1.0 # assume event actually takes place on the next day
diff = vabs(MINUTES_PER_DAY * diff) # convert signed days to absolute minutes
if diff > 0.5:
return Fail('RiseSetElevationBodyCase {} {}: EXCESSIVE diff = {}.'.format(body, direction, diff))
return 0
def RiseSetElevation():
regex = re.compile(r'^(\d+)-(\d+)-(\d+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\d+):(\d+)\s+(\d+):(\d+)\s+(\d+):(\d+)\s+(\d+):(\d+)\s+(\S+)\s*$')
filename = 'riseset/elevation.txt'
with open(filename, 'rt') as infile:
lnum = 0
for line in infile:
lnum += 1
if line.startswith('#'):
continue
m = regex.match(line)
if not m:
return Fail('RiseSetElevation({} line {})'.format(filename, lnum), 'Invalid data format')
year = int(m.group(1))
month = int(m.group(2))
day = int(m.group(3))
latitude = v(float(m.group(4)))
longitude = v(float(m.group(5)))
height = v(float(m.group(6)))
metersAboveGround = v(float(m.group(7)))
srh = int(m.group( 8))
srm = int(m.group( 9))
ssh = int(m.group(10))
ssm = int(m.group(11))
mrh = int(m.group(12))
mrm = int(m.group(13))
msh = int(m.group(14))
msm = int(m.group(15))
# Get search origin time
time = astronomy.Time.Make(year, month, day, 0, 0, 0.0)
# Convert scanned values into sunrise, sunset, moonrise, moonset day offsets.
sr = (srh + srm/60.0) / 24.0
ss = (ssh + ssm/60.0) / 24.0
mr = (mrh + mrm/60.0) / 24.0
ms = (msh + msm/60.0) / 24.0
observer = astronomy.Observer(latitude, longitude, height)
if (0 != RiseSetElevationBodyCase(astronomy.Body.Sun, observer, astronomy.Direction.Rise, metersAboveGround, time, sr) or
0 != RiseSetElevationBodyCase(astronomy.Body.Sun, observer, astronomy.Direction.Set, metersAboveGround, time, ss) or
0 != RiseSetElevationBodyCase(astronomy.Body.Moon, observer, astronomy.Direction.Rise, metersAboveGround, time, mr) or
0 != RiseSetElevationBodyCase(astronomy.Body.Moon, observer, astronomy.Direction.Set, metersAboveGround, time, ms)):
return 1
return Pass('RiseSetElevation')
#-----------------------------------------------------------------------------------------------------------
UnitTests = {
'aberration': Aberration,
'atmosphere': Atmosphere,
'axis': Axis,
'barystate': BaryState,
'constellation': Constellation,
'dates250': DatesIssue250,
'ecliptic': Ecliptic,
'elongation': Elongation,
'geoid': Geoid,
'global_solar_eclipse': GlobalSolarEclipse,
'gravsim': GravSimTest,
'heliostate': HelioState,
'hour_angle': HourAngle,
'issue_103': Issue103,
'jupiter_moons': JupiterMoons,
'lagrange': Lagrange,
'libration': Libration,
'local_solar_eclipse': LocalSolarEclipse,
'lunar_apsis': LunarApsis,
'lunar_eclipse': LunarEclipse,
'lunar_eclipse_78': LunarEclipseIssue78,
'lunar_fraction': LunarFraction,
'magnitude': Magnitude,
'moon': GeoMoon,
'moon_nodes': MoonNodes,
'moon_reverse': MoonReverse,
'moonphase': MoonPhase,
'planet_apsis': PlanetApsis,
'pluto': PlutoCheck,
'refraction': Refraction,
'repr': Repr,
'riseset': RiseSet,
'riseset_elevation': RiseSetElevation,
'riseset_reverse': RiseSetReverse,
'rotation': Rotation,
'seasons': Seasons,
'seasons187': SeasonsIssue187,
'sidereal': SiderealTime,
'solar_fraction': SolarFraction,
'star_risesetculm': StarRiseSetCulm,
'time': AstroTime,
'topostate': TopoState,
'transit': Transit,
'twilight': Twilight,
}
#-----------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == '-v':
sys.argv = sys.argv[1:]
Verbose = True
if len(sys.argv) == 2:
name = sys.argv[1]
if name in UnitTests:
sys.exit(UnitTests[name]())
if name in ['astro_check', 'astro_profile']:
sys.exit(AstroCheck(sys.argv[1] == 'astro_check'))
if name == 'all':
for name in sorted(UnitTests.keys()):
func = UnitTests[name]
Debug('test.py: Starting test "{}"'.format(name))
rc = func()
Debug('test.py: Test "{}" returned {}'.format(name, rc))
if rc != 0:
sys.exit(1)
print('test.py: ALL PASS')
sys.exit(0)
print('test.py: Invalid command line arguments.')
sys.exit(1)
| [
"cosinekitty@gmail.com"
] | cosinekitty@gmail.com |
10f8f671502abf2da6674392812b7ae4c99f03ef | 40a2ad2e35e88bfe2885621d9938b78ad0114a24 | /kuhle/wsgi.py | 85b73bbd906b79f7e4368df6580f7fd0a0de614c | [] | no_license | doctorlloyd/kuhle | 79b614cf18240b1e5034eb35642e49e52f928f14 | 7490ae8724501fdd4ecfee675a5bd3b6cc23ee92 | refs/heads/master | 2020-07-30T05:05:45.062122 | 2019-10-01T08:13:04 | 2019-10-01T08:13:04 | 210,096,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for kuhle project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kuhle.settings')
application = get_wsgi_application()
| [
"doctorlloyd9@gmail.com"
] | doctorlloyd9@gmail.com |
1359553cc3287a820189fbf4d8e567261500f703 | d0430e75fd1c30ee1f780b2a4ca8773aa1430a68 | /apps/models/virtual/dungeon.py | 6fbf1b405546cd844fad25e9e12b5a8f64b45b47 | [
"Apache-2.0"
] | permissive | leigeng2014/sango2 | 7139a90e75e683f56127acdd6f79f1ffb664dd55 | aa0a3ed1a316d8afc9482f072f2aa57cffe9a10f | refs/heads/add-license-1 | 2021-01-10T08:38:22.885241 | 2020-10-28T02:41:00 | 2020-10-28T02:41:00 | 44,725,363 | 0 | 0 | Apache-2.0 | 2020-10-28T03:02:25 | 2015-10-22T06:03:55 | JavaScript | UTF-8 | Python | false | false | 1,875 | py | #-*- coding: utf-8 -*-
from apps.models.virtual.soldier import Soldier
class Dungeon(object):
"""战斗"""
def __init__(self, uid):
"""初始化战场数据
"""
self.uid = uid
self.first_army = []
self.second_army = []
self.result = []
self.is_success = 0
def load_first_army(self,first_army):
"""加载第一军队"""
for i,card in enumerate(first_army):
solider = Soldier(self,card)
solider.position = '1_'+ str(i)
self.first_army.append(solider)
def load_second_army(self,second_army):
"""加载第二军队 """
for i,card in enumerate(second_army):
solider = Soldier(self,card)
solider.position = '2_'+str(i)
self.second_army.append(solider)
def is_dungeon_finish(self):
"""判断战斗是否结束
"""
#第二军队全死掉了
if not [soldier for soldier in self.second_army if soldier.hp > 0]:
self.is_success = 1
return 1
#第一军队全死掉了
if not [soldier for soldier in self.first_army if soldier.hp > 0]:
return 2
return 0
def run(self):
"""开始战斗
"""
max_round = 50 #最大回合数
while max_round > 0:
max_round -= 1
if self.is_dungeon_finish() > 0:
break
first_army_len = len(self.first_army)
second_army_len = len(self.second_army)
for i in range(max(first_army_len,second_army_len)):
if first_army_len > i:
self.first_army[i].action()
if second_army_len > i:
self.second_army[i].action() | [
"519087819@qq.com"
] | 519087819@qq.com |
44c7ea4c09cf76c997f45dc2c463741f3ba3af03 | 5f6425e9d83b57b864e48f227e1dc58356a555c0 | /utils/palettes/personalized/piotr_kozlowski.py | 8f12ab59b92e28ce4bf7bd066c55b145ec16a2f9 | [
"MIT"
] | permissive | jan-warchol/selenized | b374fa7822f281b16aa8b52e34bd1e585db75904 | df1c7f1f94f22e2c717f8224158f6f4097c5ecbe | refs/heads/master | 2023-06-22T09:37:02.962677 | 2022-09-12T20:24:40 | 2022-09-12T20:24:40 | 45,570,283 | 663 | 58 | MIT | 2023-04-18T09:33:22 | 2015-11-04T22:00:52 | Emacs Lisp | UTF-8 | Python | false | false | 217 | py | import selenized_base
name = 'Piotr Kozlowski'
palette = selenized_base.generate_palette(
background=(97, 0, 8),
foreground=(25, -6, -6),
saturation=1.4,
accent_offset=5,
accent_l_spread=30,
)
| [
"jan.warchol@gmail.com"
] | jan.warchol@gmail.com |
dac13f5e5e05cde9a0e794a85a9b310f0cb35528 | bc5f2a7cce6093ba1f5785b02215df34d7e48a68 | /src/YYOwnBlog/settings.py | e80d47fc13792931076af62f165026ea519a18bb | [] | no_license | congyingTech/YYOwnBlog | 5b84bfd1cb7884f323ec76a67ec26bb1afc8a406 | 57718ba3363385ac4da0840f56d7cd15f903d512 | refs/heads/master | 2021-01-20T20:24:43.736682 | 2016-06-17T03:20:36 | 2016-06-17T03:20:36 | 61,342,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,290 | py | """
Django settings for YYOwnBlog project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(d0dmfin1@=g8-16bs&r&-4u-cbd@aj_kf6ak0n=*uc%qf$!_q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog', # don't forget register blog in INSTALLED_APPS
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'YYOwnBlog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates').replace('\\', '/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'YYOwnBlog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"congyingTech@163.com"
] | congyingTech@163.com |
1c9955ceb771f4498cae1b4f3bac87fe805bd2f5 | 13fb8e63a4e84b3028c351cea54729ee0234c81b | /core/demo4_GEN_MM.py | 6a6f3bf09a61047c404921ba7fc687f4eab1899b | [
"MIT"
] | permissive | samiratzn/patent-similarity-1 | beddd5aa9bee3d181bd36a329506d81339433417 | a64eda5665f76f771c629caf4abe58f1aa0a11b8 | refs/heads/master | 2020-04-01T17:57:29.387211 | 2017-10-13T08:45:22 | 2017-10-13T08:45:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | #coding:utf-8
import os
from gensim import corpora
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
DICT_PATH = "dict/pat.dict"
CORPUS_PATH = "corpus/pat_only_abstract.txt";
if (os.path.exists(DICT_PATH)):
dictionary = corpora.Dictionary.load(DICT_PATH)
print("Used files generated")
else:
print("Please GEN DICT FIRST")
corpus = []
f1 = open(CORPUS_PATH)
f1.readline()
i = 0
for text in f1:
simtext = text.lower().split();
a = dictionary.doc2bow(simtext)
corpus.append(a)
i = i + 1
if(i%100000==0):
print str(i) + ":完成100000批次:" + str(a)
corpora.MmCorpus.serialize("dict/pat.mm", corpus)
f1.close()
# print(corpus[0])
# print(corpus[1])
# print(corpus[2])
# store to disk, for later use | [
"genix@greysh.com"
] | genix@greysh.com |
fbd901146e030f0b615ac6a0f9c6e7e500be2f2f | 5c3299d542d0d0bbe6bddadd8c0afd091b74d880 | /scripts/generators/exercises/non_primes_generator.py | 10da61ab1d6355e47a0eea87b4ca4f6c7ffe9fbb | [] | no_license | estebansolo/PythonScripts | cfae6c790717e49461a0fcb5de23ede214dee6b5 | 1c42395bb9bca13b42b35893bc9d374c77cd399f | refs/heads/master | 2020-09-27T06:42:54.500494 | 2020-07-21T23:57:34 | 2020-07-21T23:57:34 | 226,455,348 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | def manipulate_generator(generator, n):
"""
This function must manipulate the generator functions
so that the first k non-prime positive integers are
printed, each on a separate line.
"""
pass
""" The following code cannot be modified except for debugging """
def positive_integers_generator():
n = 1
while True:
x = yield n
if x is not None:
n = x
else:
n += 1
if __name__ == '__main__':
"""
Given an integer k, print the first k non-prime positive integers,
each on a new line. For example, if k = 10, the output would be:
1 4 6 8 9 10 12 14 15 16 18 20
"""
k = int(input())
g = positive_integers_generator()
for _ in range(k):
n = next(g)
print(n)
manipulate_generator(g, n)
"""
Test 1:
- Inputs
12
- Outpus
1 4 6 8 9 10 12 14 15 16 18 20
""" | [
"estebansolorzano27@gmail.com"
] | estebansolorzano27@gmail.com |
0e40113bb93d14a37065a6e9255cb2374597cebd | 502a59a25838ef5c01c574b9091da918166e1c35 | /models/test_model.py | 653339285d94cea9aecb6c2727f29d0c2808b70d | [
"CC-BY-4.0"
] | permissive | Jack12xl/Rotate-and-Render | 124382ec8fc4eff1e5f59b738ebe594036d6614e | 6f04aeaf4bb631cdc8e694277bf8fd22e6a7df07 | refs/heads/master | 2022-04-19T20:07:22.340121 | 2020-04-15T07:05:36 | 2020-04-15T07:05:36 | 255,600,244 | 0 | 0 | CC-BY-4.0 | 2020-04-14T12:15:42 | 2020-04-14T12:15:41 | null | UTF-8 | Python | false | false | 2,756 | py | import torch
import models.networks as networks
import util.util as util
from data import curve
import numpy as np
import os
from models.rotatespade_model import RotateSPADEModel
class TestModel(RotateSPADEModel):
@staticmethod
def modify_commandline_options(parser, is_train):
networks.modify_commandline_options(parser, is_train)
return parser
def __init__(self, opt):
super(TestModel, self).__init__(opt)
def forward(self, data, mode):
if mode == 'single':
real_image = data['image']
rotated_landmarks = data['rotated_landmarks']
original_angles = data['original_angles']
self.rotated_seg, rotated_seg_all = \
self.get_seg_map(rotated_landmarks, self.opt.no_gaussian_landmark, self.opt.crop_size, original_angles)
rotated_mesh = data['rotated_mesh']
if self.opt.label_mask:
rotated_mesh = (rotated_mesh + rotated_seg_all[:, 4].unsqueeze(1) + rotated_seg_all[:, 0].unsqueeze(1))
rotated_mesh[rotated_mesh >= 1] = 0
with torch.no_grad():
fake_rotate, _ = self.generate_fake(rotated_mesh, real_image, self.rotated_seg)
return fake_rotate
else:
real_image = data['image']
orig_landmarks = data['orig_landmarks']
rotated_landmarks = data['rotated_landmarks']
orig_seg, orig_seg_all = self.get_seg_map(orig_landmarks, self.opt.no_gaussian_landmark, self.opt.crop_size)
rotated_seg, rotated_seg_all = self.get_seg_map(rotated_landmarks, self.opt.no_gaussian_landmark,
self.opt.crop_size)
input_semantics = data['mesh']
rotated_mesh = data['rotated_mesh']
BG = data['BG']
if self.opt.label_mask:
input_semantics = (input_semantics + orig_seg_all[:, 4].unsqueeze(1) + orig_seg_all[:, 0].unsqueeze(1))
rotated_mesh = (rotated_mesh + rotated_seg_all[:, 4].unsqueeze(1) + rotated_seg_all[:, 0].unsqueeze(1))
input_semantics[input_semantics >= 1] = 0
rotated_mesh[rotated_mesh >= 1] = 0
with torch.no_grad():
if self.opt.label_mask:
rotated_mesh = (
rotated_mesh + rotated_seg_all[:, 4].unsqueeze(1) + rotated_seg_all[:, 0].unsqueeze(1))
rotated_mesh[rotated_mesh >= 1] = 0
fake_image, _ = self.generate_fake(input_semantics, real_image, self.orig_seg)
fake_rotate, _ = self.generate_fake(rotated_mesh, real_image, self.rotated_seg)
return fake_image, fake_rotate
| [
"zhouhang@link.cuhk.edu.hk"
] | zhouhang@link.cuhk.edu.hk |
4f546657e2e3eaafbcbe8e8df6ee238fc1ff818c | 81098f0ba8b3ba0c2dfcb69b9efbe2a2428ac7d3 | /btree/asgi.py | 049802a4790582be9ce66251d9dcb0531bf4de5e | [] | no_license | ashwani90/btree | 94e90b321970887576b54c70ea1c6c6e80cdd875 | 2986518f5e91c72cf8dd0153dc5a134884f8dfe4 | refs/heads/main | 2023-07-12T09:44:26.605997 | 2021-08-18T07:43:07 | 2021-08-18T07:43:07 | 397,336,722 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
ASGI config for btree project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'btree.settings')
application = get_asgi_application()
| [
"ashwani@90125@gmail.com"
] | ashwani@90125@gmail.com |
fd546010cc0c4a4bfca1b77695e8d2be557fc756 | f7403d2fed011e825a183529c99d159cfc9c2176 | /src/browser.py | 36f6c9fd74b0cbb6c298bfb717f8f59aed275d06 | [
"MIT"
] | permissive | mgear2/pg-forum | 9cf8ba5ee00970783c141b2a311cfba537fec69b | 88545026a6e2593666ef204beab165d9500a9d45 | refs/heads/master | 2023-01-24T13:21:27.414584 | 2020-12-05T07:22:54 | 2020-12-05T07:22:54 | 184,829,294 | 0 | 0 | MIT | 2020-12-05T07:22:55 | 2019-05-03T22:52:25 | Python | UTF-8 | Python | false | false | 18,454 | py | # pylint: disable=import-error
from textwrap import TextWrapper
import sys
import datetime
import psycopg2
class Browser:
def __init__(self, connector):
self.connector = connector
self._exploreuserssql = (
"SELECT user_id, user_name, location "
"FROM Users "
"OFFSET (%s) LIMIT (%s)"
)
self._explorepostssql = (
"SELECT post_id, title "
"FROM Posts "
"WHERE title != 'None' "
"OFFSET (%s) LIMIT (%s)"
)
self._exploretagssql = (
"SELECT tag_id, tag_name " "FROM Tags " "OFFSET (%s) LIMIT (%s)"
)
self._viewusersql = (
"SELECT users.user_id, user_name, location,"
"reputation, creation_date, last_active_date "
"FROM Users "
"WHERE users.user_id = (%s)"
)
self._userbadgessql = (
"SELECT badge_name "
"FROM Users, Decorated, Badges "
"WHERE users.user_id = (%s) "
"AND decorated.user_id=(%s) "
"AND badges.badge_id=decorated.badge_id"
)
self._viewpostsql = (
"SELECT creation_date, last_edit_date,"
"favorite_count, view_count, score, title, post_id, body "
"FROM posts "
"WHERE post_id = (%s)"
)
self._viewpostersql = (
"SELECT users.user_name "
"FROM users, posted "
"WHERE posted.post_id = (%s) "
"AND users.user_id=posted.user_id"
)
self._viewsubpostssql = (
"SELECT creation_date, last_edit_date, "
"favorite_count, view_count, score, title, posts.post_id, body "
"FROM posts, subposts "
"WHERE subposts.parent_id = (%s) "
"AND Posts.post_id = Subposts.child_id"
)
self._findparentsql = (
"SELECT subposts.parent_id "
"FROM Subposts "
"WHERE subposts.child_id = (%s)"
)
self._viewcommentssql = (
"SELECT thread.post_id, comments.comment_id,"
"comments.score, comments.creation_date, comments.text "
"FROM Comments, Thread, Posts "
"WHERE posts.post_id = (%s) "
"AND posts.post_id = thread.post_id "
"AND thread.comment_id = comments.comment_id"
)
self._viewcommentersql = (
"SELECT users.user_name "
"FROM Commented, Users "
"WHERE commented.comment_id = (%s) "
"AND commented.user_id = users.user_id"
)
self._confirmtagsql = (
"SELECT tags.tag_name, posts.post_id, posts.title "
"FROM Tags, Posts, Tagged "
"WHERE tags.tag_id = (%s) "
"AND tags.tag_id = tagged.tag_id "
"AND tagged.post_id = posts.post_id LIMIT 5"
)
self._viewtagpostssql = (
"SELECT tags.tag_name, posts.post_id, posts.title "
"FROM Tags, Posts, Tagged "
"WHERE tags.tag_id = (%s) "
"AND tags.tag_id = tagged.tag_id "
"AND tagged.post_id = posts.post_id OFFSET (%s) LIMIT (%s)"
)
self._newpostidsql = "SELECT max(post_id) " "FROM Posts"
self._newpostedsql = (
"INSERT INTO Posted " "(user_id, post_id) " "VALUES (%s, %s)"
)
self._newpostsql = "CALL newpost(%s, %s, %s, %s, %s, %s, %s, %s)"
self._posttagsql = "INSERT INTO Tagged " "(tag_id, post_id) " "VALUES (%s, %s)"
self._findtagidsql = (
"SELECT tag_id, tag_name " "FROM Tags " "WHERE tag_name = (%s)"
)
self._newsubpostsql = (
"INSERT INTO Subposts " "(parent_id, child_id) " "VALUES (%s, %s)"
)
self._newcommentsql = (
"INSERT INTO Comments "
"(comment_id, score, creation_date, text) "
"VALUES (%s, %s, %s, %s)"
)
self._newcommentidsql = "SELECT max(comment_id) " "FROM Comments"
self._newcommentedsql = (
"INSERT INTO Commented " "(user_id, comment_id) " "VALUES (%s, %s)"
)
self._newthreadsql = (
"INSERT INTO Thread " "(post_id, comment_id) " "VALUES (%s, %s)"
)
self._deletefromtaggedsql = "DELETE FROM Tagged " "WHERE Tagged.post_id = (%s)"
self._deletefrompostssql = "DELETE FROM Posts " "WHERE Posts.post_id=(%s)"
self._deletefromsubpostsql = (
"DELETE FROM Subposts " "WHERE parent_id = (%s) " "OR child_id = (%s)"
)
self._deletefrompostedsql = (
"DELETE FROM Posted " "WHERE user_id = (%s) " "OR post_id = (%s)"
)
self._deletefromcommentedsql = (
"DELETE FROM Commented " "WHERE comment_id = (%s)"
)
self._deletefromcommentssql = "DELETE FROM Comments " "WHERE comment_id = (%s)"
self._deletefromthreadsql = (
"DELETE FROM Thread " "WHERE comment_id = (%s) " "OR post_id = (%s)"
)
self.user_id = -999
self.offset = 0
self.limit = 10
self.divider = (
"---------------------------------------"
"---------------------------------------"
)
def exit(self):
self.connector.disconnect()
sys.exit(0)
def commandrunner(self):
while True:
userstring = input("Enter Command: ")
verify = userstring.split(" ")
if userstring == "":
continue
elif userstring == "exit":
self.exit()
elif userstring == "query tool" or userstring == "sqlrunner":
self.sqlrunner()
continue
elif verify[0] == "explore":
if len(verify) < 2:
print("Please define a context to explore")
continue
self.explore(verify[1])
continue
elif verify[0] == "view":
if len(verify) < 3:
print("Please define both a context and an id to view")
continue
self.view(verify[1], verify[2])
continue
elif verify[0] == "new":
if len(verify) < 2:
print("Please define a context for new")
continue
self.new(verify)
continue
elif verify[0] == "delete":
if len(verify) < 2:
print("Please define a context for delete")
continue
self.delete(verify)
continue
else:
print("Command not recognized")
def sqlrunner(self):
print("Entering Query Tool")
while True:
userstring = input("Enter SQL Query: ")
if userstring == "exit":
self.exit()
if userstring == "back":
print("Exiting Query Tool")
return
returnval = self.connector.operate(userstring, None)
if isinstance(returnval, list):
for val in returnval:
print(val)
def explore(self, context):
print("Exploring {0}".format(context))
print("<ENTER> for more results, 'back' to return to command line")
self.offset = 0
if context == "users":
inputstring = self._exploreuserssql
elif context == "posts":
inputstring = self._explorepostssql
elif context == "tags":
inputstring = self._exploretagssql
else:
print("Can't explore {0}".format(context))
return
userstring = ""
while True:
if userstring == "":
returnval = self.connector.operate(
inputstring, (self.offset, self.limit)
)
if isinstance(returnval, list):
for val in returnval:
print(val)
if returnval == [] or len(returnval) < 10:
print("End of results")
break
userstring = input("<ENTER>/'back': ")
self.offset += 10
continue
if userstring == "exit":
self.exit()
if userstring == "back":
print("Exiting explorer")
return
def printuser(self, row, badges):
print("Id:\t\t{0}".format(row[0]))
print("Name:\t\t{0}".format(row[1]))
print("Location:\t{0}".format(row[2]))
print("Badges:\t\t{0}".format(badges))
print("Reputation:\t{0}".format(row[3]))
print("Joined:\t\t{0}".format(row[4]))
print("Last Active:\t{0}".format(row[5]))
def printpost(self, row, postuser, indent):
indentstring = ""
title = row[5]
i = 0
while i < indent:
indentstring += "\t"
i += 1
wrapper = TextWrapper(
width=79, initial_indent=indentstring, subsequent_indent=indentstring
)
if row[5] == None and indent == 0:
parent = self.connector.operate(self._findparentsql, (row[6],))
if parent != []:
title = "Subpost of Post {0}".format(parent[0][0])
if indent == 0:
print(self.divider + "\n" + self.divider)
print("{0}Title:\t{1}".format(indentstring, title))
body = wrapper.wrap(row[7])
for line in body:
print(line)
print(
(
"{0}By:\t{1}\tID: {5}\t\tScore: {2}\t"
"Views: {3}\tFavorites: {4}".format(
indentstring, postuser, row[4], row[3], row[2], row[6]
)
)
)
print("{0}Posted: {1}\tLast Edited: {2}".format(indentstring, row[0], row[1]))
print(self.divider)
def printcomments(self, comments, indent):
for row in comments:
commentuser = self.connector.operate(self._viewcommentersql, (row[1],))
if commentuser == False:
commentuser = "Not found"
else:
commentuser = commentuser[0][0]
indentstring = ""
i = 0
while i < indent:
indentstring += "\t"
i += 1
wrapper = TextWrapper(
width=79, initial_indent=indentstring, subsequent_indent=indentstring
)
body = wrapper.wrap(row[4])
for line in body:
print(line)
print(
"{0}By:\t{1}\tID: {2}\t\tScore: {3}".format(
indentstring, commentuser, row[1], row[2]
)
)
print("{0}Posted: {1}".format(indentstring, row[3]))
print(self.divider)
def verifyid(self, inputstring, given_id):
returnval = self.connector.operate(inputstring, given_id)
if (isinstance(returnval, list)) == False:
print("Ensure that ID is integer value")
return False
elif returnval == []:
print("No results")
return False
else:
return returnval
def view(self, context, given_id):
print("Viewing {0} with ID {1}".format(context, given_id))
self.offset = 0
if context == "user":
returnval = self.verifyid(self._viewusersql, (given_id,))
if returnval != False:
self.viewuser(given_id, returnval)
return
elif context == "post":
returnval = self.verifyid(self._viewpostsql, (given_id,))
if returnval != False:
self.viewpost(given_id, returnval)
return
elif context == "tag":
self.viewtag(given_id)
return
print("Can't view {0}".format(context))
return
def viewuser(self, given_id, returnval):
userbadges = self.connector.operate(self._userbadgessql, (given_id, given_id))
badges = []
for badge in userbadges:
badges += badge
self.printuser(returnval[0], badges)
return
def viewpost(self, given_id, returnval):
subposts = self.connector.operate(self._viewsubpostssql, (given_id,))
postuser = self.connector.operate(self._viewpostersql, (returnval[0][6],))
if postuser == []:
postuser = "User not found with Id {0}".format(given_id)
else:
postuser = postuser[0][0]
self.printpost(returnval[0], postuser, 0)
comments = self.connector.operate(self._viewcommentssql, (given_id,))
self.printcomments(comments, 2)
for post in subposts:
subpostuser = self.connector.operate(self._viewpostersql, (post[6],))
if subpostuser == []:
subpostuser = "User not found"
else:
subpostuser = subpostuser[0][0]
self.printpost(post, subpostuser, 1)
comments = self.connector.operate(self._viewcommentssql, (post[6],))
self.printcomments(comments, 2)
return
def viewtag(self, given_id):
returnval = self.connector.operate(
self._viewtagpostssql, (given_id, self.offset, self.limit)
)
if isinstance(returnval, list):
for val in returnval:
print(val)
if returnval == [] or len(returnval) < 10:
print("End of results")
else:
print("No results")
def new(self, verifylist):
if verifylist[1] == "post":
if len(verifylist) < 3:
self.newpost(verifylist)
return
else:
self.newsubpost(verifylist)
return
if verifylist[1] == "comment":
if len(verifylist) < 3:
print("Please define a post id to comment on")
return
else:
self.newcomment(verifylist)
return
else:
print("Unrecognized command")
return
def newpost(self, verifylist):
newtitle = ""
newtitle = input("Enter Post Title: ")
now = datetime.datetime.now()
newbody = ""
newbody = input("Enter Post Body: ")
newtags = input("Enter Post Tags as <Tag1,Tag2,Tag3...>: ")
newtags = newtags.split(",")
newid = self.connector.operate(self._newpostidsql, None)
newid = newid[0][0]
newid += 1
newpost = (newid, now, now, 0, 0, 0, newtitle, newbody)
self.connector.operate(self._newpostsql, newpost)
self.connector.operate(self._newpostedsql, (self.user_id, newid))
string = "INSERT INTO Tagged (tag_id, post_id) VALUES "
tuples = ()
i = 0
for tag in newtags:
i += 1
dbtag = self.connector.operate(self._findtagidsql, (tag,))
string += "(%s, %s)"
tuples += dbtag[0][0], newid
if i < len(newtags):
string += ","
self.connector.operate(string, tuples)
print("Created new post with ID {0}".format(newid))
def newsubpost(self, verifylist):
newtitle = None
parent = self.connector.operate(self._viewpostsql, (verifylist[2],))
if parent == []:
print("Given Post ID not found")
return
if isinstance(parent, psycopg2.errors.InvalidTextRepresentation):
print("ID must be integer")
return
parent = parent[0][6]
now = datetime.datetime.now()
newbody = ""
newbody = input("Enter Post Body: ")
newid = self.connector.operate(self._newpostidsql, None)
newid = newid[0][0]
newid += 1
newpost = (newid, now, now, 0, 0, 0, newtitle, newbody)
self.connector.operate(self._newpostsql, newpost)
self.connector.operate(self._newpostedsql, (self.user_id, newid))
self.connector.operate(self._newsubpostsql, (parent, newid))
print("Created new post with ID {0} on Parent {1}".format(newid, parent))
def newcomment(self, verifylist):
parent = verifylist[2]
if parent == []:
print("Given Post ID not found")
return
if isinstance(parent, psycopg2.errors.InvalidTextRepresentation):
print("ID must be integer")
return
print(parent)
now = datetime.datetime.now()
newbody = input("Enter Post Body: ")
newid = self.connector.operate(self._newcommentidsql, None)
newid = newid[0][0]
newid += 1
newcomment = (newid, 0, now, newbody)
self.connector.operate(self._newcommentsql, newcomment)
self.connector.operate(self._newcommentedsql, (self.user_id, newid))
self.connector.operate(self._newthreadsql, (parent, newid))
print("Created new Comment with ID {0} on Parent {1}".format(newid, parent))
def delete(self, verifylist):
if len(verifylist) < 3:
print("Please define an ID to delete")
return
if verifylist[1] == "post":
self.deletepost(verifylist)
return
if verifylist[1] == "comment":
self.deletecomment(verifylist)
return
else:
print("Unrecognized command")
return
def deletepost(self, verifylist):
postid = verifylist[2]
parent = self.connector.operate(self._viewpostsql, (verifylist[2],))
if parent != []:
parent = parent[0][6]
self.connector.operate(self._deletefromsubpostsql, (postid, parent))
self.connector.operate(self._deletefrompostedsql, (self.user_id, postid))
self.connector.operate(self._deletefromtaggedsql, (postid,))
self.connector.operate(self._deletefrompostssql, (postid,))
print("Deleted post with ID {0}".format(postid))
def deletecomment(self, verifylist):
commentid = verifylist[2]
self.connector.operate(self._deletefromcommentedsql, (commentid,))
self.connector.operate(self._deletefromthreadsql, (commentid, self.user_id))
self.connector.operate(self._deletefromcommentssql, (commentid,))
print("Deleted comment with ID {0}".format(commentid))
| [
"mgear2@pdx.edu"
] | mgear2@pdx.edu |
fbf1f210ee56532147d51767e2d16d9694b7895c | 6e3869321977010a61c7c85f7d7ef98ee01f237a | /Python3/16123004.py | f266adc31f397d924c0ab42560f03e948b6b2998 | [] | no_license | SATAKSHI08/hello-cops | 8b7b82ae518b96ad20687c4548543c328ccab91a | 196c50a2cbfe48f16ba6ba1443c0345ae49cbe46 | refs/heads/master | 2020-08-30T04:04:53.094477 | 2019-10-29T10:10:24 | 2019-10-29T10:10:24 | 218,257,477 | 1 | 0 | null | 2019-10-29T10:08:06 | 2019-10-29T10:08:06 | null | UTF-8 | Python | false | false | 20 | py | print('Hello COPS')
| [
"contact@imakshay.com"
] | contact@imakshay.com |
db89af7b8743e566555d37f9cdde43a266c06ba3 | e509da6081ebace6d48fcaf6438b113745a941a7 | /homework10/venv/Scripts/easy_install-3.7-script.py | 3bd64afdcc951912c4094e6b62da5ee5aa7dd179 | [] | no_license | bopopescu/PythonHomeWork | 41c801e78f934c886c1c986ca50f360832e28c33 | 1b5623deff1ce66593c0bb9d25b719888a51eaec | refs/heads/master | 2022-11-20T16:17:26.441881 | 2020-07-12T09:20:54 | 2020-07-12T09:20:54 | 281,798,813 | 0 | 0 | null | 2020-07-22T22:51:14 | 2020-07-22T22:51:13 | null | UTF-8 | Python | false | false | 461 | py | #!C:\Users\NsZoth\PythonHomeWork\homework10\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"2367018272@qq.com"
] | 2367018272@qq.com |
ac446b4eaaccc5bde520f50bc48dfc62a4bf2970 | 30948f6fdb64d7a765ac4e359cac2a8be26390c1 | /find_gates.py | 650ac7ef9250dd1da212a12c40e597957f80ef41 | [] | no_license | luckynozomi/BlockysRevenge | 29fd868f9038e4e8d00218c74ce260cbac0ce4ff | d6f14299b996bbda3784bea1a5e8f0cfe1b63fa9 | refs/heads/master | 2020-04-12T11:33:31.465210 | 2018-12-19T16:45:28 | 2018-12-19T16:45:28 | 162,463,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,112 | py | import numpy as np
import itertools
def is_const0(vec):
return np.sum(vec) == 0
def is_const1(vec):
return np.sum(vec) == vec.shape[0] * 255
def is_equal(vec1, vec2):
return np.array_equal(vec1, vec2)
def is_not(vec1, vec2):
return np.array_equal(np.bitwise_not(vec1), vec2)
def is_and(vec_o, vec_i1, vec_i2):
vec_and = np.bitwise_and(vec_i1, vec_i2)
return np.array_equal(vec_o, vec_and)
def is_or(vec_o, vec_i1, vec_i2):
vec_or = np.bitwise_or(vec_i1, vec_i2)
return np.array_equal(vec_o, vec_or)
def is_xor(vec_o, vec_i1, vec_i2):
vec_xor = np.bitwise_xor(vec_i1, vec_i2)
return np.array_equal(vec_o, vec_xor)
def is_and3(vec_o, vec_i1, vec_i2, vec_i3):
vec_and2 = np.bitwise_and(vec_i1, vec_i2)
vec_and3 = np.bitwise_and(vec_and2, vec_i3)
return np.array_equal(vec_o, vec_and3)
def is_or3(vec_o, vec_i1, vec_i2, vec_i3):
vec_or2 = np.bitwise_or(vec_i1, vec_i2)
vec_or3 = np.bitwise_or(vec_or2, vec_i3)
return np.array_equal(vec_o, vec_or3)
def is_xor3(vec_o, vec_i1, vec_i2, vec_i3):
vec_xor2 = np.bitwise_xor(vec_i1, vec_i2)
vec_xor3 = np.bitwise_xor(vec_xor2, vec_i3)
return np.array_equal(vec_o, vec_xor3)
# Read data
def read_data():
with open("training_data", "r") as data_file:
i = []
o = []
for line in data_file:
this_i, this_o = line.strip().split(',')
this_i = list(map(int, this_i))
this_o = list(map(int, this_o))
i.append(this_i)
o.append(this_o)
i_mat = np.asarray(i, dtype=np.int)
rows = i_mat.shape[0]
# np.packbits packs every 8 bits into an uint_8. It appends 0s at the end and is not desired to learn the gates.
# For this reason, the last few rows need to be excluded from the iing data.
rows_exclude = rows % 8
i_mat = np.packbits(i_mat[0:rows-rows_exclude, :], axis=0)
o_mat = np.asarray(o, dtype=np.int)
o_mat = np.packbits(o_mat[0:rows - rows_exclude, :], axis=0)
return i_mat, o_mat
def find_const(o_mat):
ret = set()
for idx in range(o_mat.shape[1]):
if is_const0(o_mat[:, idx]):
print(idx, '=', "CONST0")
ret.add(idx)
elif is_const1(o_mat[:, idx]):
print(idx, '=', "CONST1")
ret.add(idx)
return ret
def find_not_unique(o_mat):
ret = set()
for idx_1, idx_2 in itertools.combinations(range(o_mat.shape[1]), 2):
if is_equal(o_mat[:, idx_2], o_mat[:, idx_1]):
ret.add(idx_2)
print(idx_2, '=', "O", idx_1)
return ret
def find_input_gates(idx_not_const, o_mat, i_mat):
ret = set()
for idx in idx_not_const:
for i in range(i_mat.shape[1]):
if is_equal(o_mat[:, idx], i_mat[:, i]):
print(idx, '=', 'I', i)
ret.add(idx)
elif is_not(o_mat[:, idx], i_mat[:, i]):
print(idx, '=', "NOT_I", i)
ret.add(idx)
return ret
def find_unary_gates(idx_remaining, idx_last_identified, idx_old_identified, o_mat):
ret = set()
for idx_1, idx_2 in itertools.product(idx_remaining, idx_last_identified):
if is_not(o_mat[:, idx_1], o_mat[:, idx_2]):
ret.add(idx_1)
print(idx_1, '=', "NOT_O", idx_2)
return ret
def find_binary_gates(idx_remaining, idx_last_identified, idx_old_identified, o_mat):
ret = set()
idx_all_identified = idx_last_identified.union(idx_old_identified)
combinations = set(itertools.combinations(idx_all_identified, 2))
combinations = combinations.difference(itertools.combinations(idx_old_identified, 2))
for idx_o in idx_remaining:
for idx_i1, idx_i2 in combinations:
if is_and(o_mat[:, idx_o], o_mat[:, idx_i1], o_mat[:, idx_i2]):
ret.add(idx_o)
print(idx_o, '=', 'AND', idx_i1, idx_i2)
elif is_or(o_mat[:, idx_o], o_mat[:, idx_i1], o_mat[:, idx_i2]):
ret.add(idx_o)
print(idx_o, '=', "OR", idx_i1, idx_i2)
elif is_xor(o_mat[:, idx_o], o_mat[:, idx_i1], o_mat[:, idx_i2]):
ret.add(idx_o)
print(idx_o, '=', "XOR", idx_i1, idx_i2)
return ret
def find_ternary_gates(idx_remaining, idx_last_identified, idx_old_identified, o_mat):
ret = set()
idx_all_identified = idx_last_identified.union(idx_old_identified)
combinations = set(itertools.combinations(idx_all_identified, 3))
combinations = combinations.difference(itertools.combinations(idx_old_identified, 3))
for idx_o in idx_remaining:
for idx_i1, idx_i2, idx_i3 in combinations:
if is_and3(o_mat[:, idx_o], o_mat[:, idx_i1], o_mat[:, idx_i2], o_mat[:, idx_i3]):
ret.add(idx_o)
print(idx_o, '=', 'AND3', idx_i1, idx_i2, idx_i3)
elif is_or3(o_mat[:, idx_o], o_mat[:, idx_i1], o_mat[:, idx_i2], o_mat[:, idx_i3]):
ret.add(idx_o)
print(idx_o, '=', "OR3", idx_i1, idx_i2, idx_i3)
elif is_xor3(o_mat[:, idx_o], o_mat[:, idx_i1], o_mat[:, idx_i2], o_mat[:, idx_i3]):
ret.add(idx_o)
print(idx_o, '=', "XOR3", idx_i1, idx_i2, idx_i3)
return ret
i_mat, o_mat = read_data()
idx_all = set(range(o_mat.shape[1]))
idx_const = find_const(o_mat)
idx_not_unique = find_not_unique(o_mat)
idx_unique_not_const = idx_all.difference(idx_const).difference(idx_not_unique)
idx_input = find_input_gates(idx_unique_not_const, o_mat, i_mat)
idx_last_identified = idx_input # Assuming const can not be input to gates
idx_remaining = idx_unique_not_const.difference(idx_last_identified)
idx_old_identified = set()
while len(idx_last_identified) != 0:
idx_unary = find_unary_gates(idx_remaining, idx_last_identified, idx_old_identified, o_mat)
idx_binary = find_binary_gates(idx_remaining, idx_last_identified, idx_old_identified, o_mat)
idx_old_identified = idx_old_identified.union(idx_last_identified)
idx_last_identified = idx_unary.union(idx_binary)
idx_remaining = idx_remaining.difference(idx_last_identified)
# At this point, only [0, 1, 14, 15] cannot be found
# print("Unable to identify:", ' '.join([str(idx) for idx in idx_remaining]))
idx_remain = {0, 1, 14, 15}
find_ternary_gates(idx_remain, idx_unique_not_const.difference(idx_remain), set(), o_mat)
idx_remaining = {0, 1, 15}
idx_last_identified = {14}
idx_old_identified = idx_unique_not_const.difference(idx_remaining).difference(idx_last_identified)
find_ternary_gates(idx_remaining, idx_last_identified, idx_old_identified, o_mat)
idx_remaining = {0, 1}
idx_last_identified = {15}
idx_old_identified = idx_unique_not_const.difference(idx_remaining).difference(idx_last_identified)
find_ternary_gates(idx_remaining, idx_last_identified, idx_old_identified, o_mat)
idx_remaining = {1}
idx_last_identified = {0}
idx_old_identified = idx_unique_not_const.difference(idx_remaining).difference(idx_last_identified)
find_ternary_gates(idx_remaining, idx_last_identified, idx_old_identified, o_mat)
| [
"suixin661014@gmail.com"
] | suixin661014@gmail.com |
15405a68670fb77344b85ae1753eef629c0eed36 | f689f2781ad40c709b7e733e61c5508395065d39 | /ingredientes/autocomplete.py | 88ae5b612ffccc9753f1d1e856f8cd1e0ab0eb95 | [] | no_license | mocomauricio/catering | a9a1078bda5f528678b96b7205eba473bbc22390 | 9901e91176f0ba626ce23f5d33b478b2b633d36a | refs/heads/master | 2021-01-12T10:04:48.188560 | 2016-12-13T11:14:16 | 2016-12-13T11:14:16 | 76,351,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | from dal import autocomplete
from django.db.models import Q
from ingredientes.models import *
class UnidadDeMedidaAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
# Don't forget to filter out results depending on the visitor !
if not self.request.user.is_authenticated():
return UnidadDeMedida.objects.none()
qs = UnidadDeMedida.objects.all()
if self.q:
qs = qs.filter( Q(nombre__icontains=self.q) | Q(abreviatura__istartswith=self.q) )
return qs
class CategoriaDeIngredienteAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
# Don't forget to filter out results depending on the visitor !
if not self.request.user.is_authenticated():
return CategoriaDeIngrediente.objects.none()
qs = CategoriaDeIngrediente.objects.all()
if self.q:
qs = qs.filter(nombre__icontains=self.q)
return qs
class IngredienteAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
# Don't forget to filter out results depending on the visitor !
if not self.request.user.is_authenticated():
return Ingrediente.objects.none()
qs = Ingrediente.objects.all()
if self.q:
qs = qs.filter( Q(codigo__istartswith=self.q) | Q(descripcion__icontains=self.q) )
return qs
| [
"mocomauricio@gmail.com"
] | mocomauricio@gmail.com |
b8968f47b87097e5dbcfa14c6e03549f0d2d40be | 6a312d580f5254c4e195c58cda42e508441b5e47 | /NN/network.py | 31dfbd74ba5f3931ff4eab41a18c11bfbfc6d4dd | [] | no_license | mhsharifi96/Hand-Digits-Recognition- | a3cfcab76a6530f7118317a2fefc4d205274b1e8 | 1da7e030a987d780bc519aa1b4c9e05c5a9ed79e | refs/heads/master | 2020-04-19T19:35:40.853028 | 2019-02-02T13:19:37 | 2019-02-02T13:19:37 | 168,392,841 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,189 | py | # %load network.py
"""
network.py
~~~~~~~~~~
IT WORKS
A module to implement the stochastic gradient descent learning
algorithm for a feedforward neural network. Gradients are calculated
using backpropagation. Note that I have focused on making the code
simple, easily readable, and easily modifiable. It is not optimized,
and omits many desirable features.
"""
#### Libraries
# Standard library
import random
# Third-party libraries
import numpy as np
class Network(object):
def __init__(self, sizes):
"""The list ``sizes`` contains the number of neurons in the
respective layers of the network. For example, if the list
was [2, 3, 1] then it would be a three-layer network, with the
first layer containing 2 neurons, the second layer 3 neurons,
and the third layer 1 neuron. The biases and weights for the
network are initialized randomly, using a Gaussian
distribution with mean 0, and variance 1. Note that the first
layer is assumed to be an input layer, and by convention we
won't set any biases for those neurons, since biases are only
ever used in computing the outputs from later layers."""
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(sizes[:-1], sizes[1:])]
def feedforward(self, a):
"""Return the output of the network if ``a`` is input."""
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta,
test_data=None,valid_data=None):
"""Train the neural network using mini-batch stochastic
gradient descent. The ``training_data`` is a list of tuples
``(x, y)`` representing the training inputs and the desired
outputs. The other non-optional parameters are
self-explanatory. If ``test_data`` is provided then the
network will be evaluated against the test data after each
epoch, and partial progress printed out. This is useful for
tracking progress, but slows things down substantially."""
training_data = list(training_data)
n = len(training_data)
if test_data:
test_data = list(test_data)
n_test = len(test_data)
if valid_data:
valid_data=list(valid_data)
n_valid=len(valid_data)
for j in range(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in range(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
if test_data:
evaluate_data = self.evaluate(test_data)
accuracy=(evaluate_data/n_test)*100
print("Epoch {} : {} / {} accuracy:{} ".format(j,evaluate_data,n_test,accuracy));
if valid_data:
evaluate_data = self.evaluate(valid_data)
accuracy=(evaluate_data/n_valid)*100
print("Epoch {} : {} / {} accuracy:{} ".format(j,evaluate_data,n_valid,accuracy));
else:
print("Epoch {} complete".format(j))
def update_mini_batch(self, mini_batch, eta):
"""Update the network's weights and biases by applying
gradient descent using backpropagation to a single mini batch.
The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``
is the learning rate."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b #wx+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = self.cost_derivative(activations[-1], y) * sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in range(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def evaluate(self, _data):
"""Return the number of test inputs for which the neural
network outputs the correct result. Note that the neural
network's output is assumed to be the index of whichever
neuron in the final layer has the highest activation."""
_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in _data]
return sum(int(x == y) for (x, y) in _results)
def cost_derivative(self, output_activations, y):
"""Return the vector of partial derivatives \partial C_x /
\partial a for the output activations."""
# print('output_activations',output_activations)
# print('output_activations',len(output_activations))
# print('y:',y)
# print('y:',len(y))
# output_activations
return (output_activations-y)
#### Miscellaneous functions
def sigmoid(z):
"""The sigmoid function."""
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
"""Derivative of the sigmoid function."""
return sigmoid(z)*(1-sigmoid(z))
| [
"mh.sh7676@gmail.com"
] | mh.sh7676@gmail.com |
b83d1fdd8409ee4425647c031ede1f769cd237fb | 9d80acd249a9a7dd7fa83c3c70859f8d1f696421 | /easyRando/asgi.py | c6fe6ce084038212e256f948ca386a8d8c6f7293 | [] | no_license | JMFrmg/cycloRando | 06a09f0d4138ec834dc57c9fdde35aafdd1f53b5 | e73536e59345b0268da41b7ea9bc083c2ce7b76e | refs/heads/master | 2022-12-08T17:15:24.594848 | 2020-08-29T13:38:53 | 2020-08-29T13:38:53 | 291,257,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
ASGI config for easyRando project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'easyRando.settings')
application = get_asgi_application()
| [
"matthieu.colombert@gmail.com"
] | matthieu.colombert@gmail.com |
eb9b9f5d0a57eaad33ba6f7c023758574e60c01e | c986c43c7bdbe404a33e408f6a0eb4cb208d630a | /FitThirdComp.py | f600a18f0411c63f6448ee29e1ab382cfbf8aacd | [] | no_license | lhermosamunoz/FitCubeM | e0774dbfc0f3ef4d98af457fd6fcab56ddd30897 | 0ec4a65b531be77fc211585054a9a2c3341d970a | refs/heads/main | 2023-01-19T21:33:58.006080 | 2020-11-24T23:57:10 | 2020-11-24T23:57:10 | 305,194,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,895 | py | import numpy as np
import lmfit
from pyspeckit import speclines as pylines
from astropy.constants import c
import Ofuncts
import os
def FitThirdComp(parentFold,l,data_cor,lin_data_fin,threeresu,meth,mu0,sig0,amp0,mu1,sig1,amp1,mu2,sig2,amp2,mu3,sig3,amp3,mu4,sig4,amp4,mu5,sig5,amp5,mu6,sig6,amp6,sig20,amp20,sig21,amp21,sig22,amp22,sig23,amp23,sig24,amp24,sig25,amp25,sig26,amp26,sl,it,l1,l2,l3,l4,l5,l6,l7,l8,l9,l10,l11,l12,l13,l14,stadev):
# Constants to be used
v_luz = c.value/10**3 # km/s
pix_to_v = 50. # km/s per arcsec BASADO EN R = c/deltaV
l_Halpha = pylines.optical.lines['H_alpha'][0]
l_OI_1 = pylines.optical.lines['OI'][0] # 6300.304
l_SII_2 = pylines.optical.lines['SIIb'][0] # 6730.82
threecomp_mod = lmfit.Model(Ofuncts.func3com)
params3c = lmfit.Parameters()
if meth == 'S':
cd1 = lmfit.Parameter('mu_0', value=threeresu.values["mu_0"],vary=False)
de = lmfit.Parameter('sig_0', value=threeresu.values["sig_0"],vary=False)
ef = lmfit.Parameter('amp_0', value=threeresu.values["amp_0"],vary=False)
fg = lmfit.Parameter('mu_1', value=threeresu.values["mu_1"],vary=False)
gh = lmfit.Parameter('sig_1', value=threeresu.values["sig_1"],vary=False)
hi = lmfit.Parameter('amp_1', value=threeresu.values["amp_1"],vary=False)
ij = lmfit.Parameter('mu_2', value=mu2,expr='mu_0*(6584./6731.)')
jk = lmfit.Parameter('sig_2', value=sig2,expr='sig_0')
kl = lmfit.Parameter('amp_2', value=amp2,min=0.)
lm = lmfit.Parameter('mu_3', value=mu3,expr='mu_0*(6563./6731.)')
mn = lmfit.Parameter('sig_3', value=sig3,expr='sig_0')
no = lmfit.Parameter('amp_3', value=amp3,min=0.)
op = lmfit.Parameter('mu_4', value=mu4,expr='mu_0*(6548./6731.)')
pq = lmfit.Parameter('sig_4', value=sig4,expr='sig_0')
qr = lmfit.Parameter('amp_4', value=amp4,min=0.,expr='amp_2*(1./3.)')
rs = lmfit.Parameter('mu_5', value=mu5,expr='mu_0*(6300.304/6730.82)')
st = lmfit.Parameter('sig_5', value=sig5,expr='sig_0')
tu = lmfit.Parameter('amp_5', value=amp5,min=0.)
uv = lmfit.Parameter('mu_6', value=mu6,expr='mu_0*(6363.77/6730.82)')
vw = lmfit.Parameter('sig_6', value=sig6,expr='sig_0')
wy = lmfit.Parameter('amp_6', value=amp6,min=0.,expr='amp_5*(1./3.)')
aaa = lmfit.Parameter('mu_20', value=threeresu.values["mu_20"],vary=False)
aab = lmfit.Parameter('sig_20', value=threeresu.values["sig_20"],vary=False)
aac = lmfit.Parameter('amp_20', value=threeresu.values["amp_20"],vary=False)
aad = lmfit.Parameter('mu_21', value=threeresu.values["mu_21"],vary=False)
aae = lmfit.Parameter('sig_21', value=threeresu.values["sig_21"],vary=False)
aaf = lmfit.Parameter('amp_21', value=threeresu.values["amp_21"],vary=False)
aag = lmfit.Parameter('mu_22', value=mu2,expr='mu_20*(6584./6731.)')
aah = lmfit.Parameter('sig_22', value=sig22,expr='sig_20')
aai = lmfit.Parameter('amp_22', value=amp22,min=0.)
aaj = lmfit.Parameter('mu_23', value=mu3,expr='mu_20*(6563./6731.)')
aak = lmfit.Parameter('sig_23', value=sig23,expr='sig_20')
aal = lmfit.Parameter('amp_23', value=amp23,min=0.)
aam = lmfit.Parameter('mu_24', value=mu4,expr='mu_20*(6548./6731.)')
aan = lmfit.Parameter('sig_24', value=sig24,expr='sig_20')
aao = lmfit.Parameter('amp_24', value=amp24,min=0.,expr='amp_22*(1./3.)')
aap = lmfit.Parameter('mu_25', value=mu5,expr='mu_20*(6300.304/6730.82)')
aaq = lmfit.Parameter('sig_25', value=sig25,expr='sig_20')
aar = lmfit.Parameter('amp_25', value=amp25,min=0.)
aas = lmfit.Parameter('mu_26', value=mu6,expr='mu_20*(6363.77/6730.82)')
aat = lmfit.Parameter('sig_26', value=sig26,expr='sig_20')
aau = lmfit.Parameter('amp_26', value=amp26,min=0.,expr='amp_25*(1./3.)')
aba = lmfit.Parameter('mu_30', value=threeresu.values["mu_20"],vary=False)
abb = lmfit.Parameter('sig_30', value=threeresu.values["sig_20"],vary=False)
abc = lmfit.Parameter('amp_30', value=threeresu.values["amp_20"],vary=False)
abd = lmfit.Parameter('mu_31', value=threeresu.values["mu_21"],vary=False)
abe = lmfit.Parameter('sig_31', value=threeresu.values["sig_21"],vary=False)
abf = lmfit.Parameter('amp_31', value=threeresu.values["amp_21"],vary=False)
abg = lmfit.Parameter('mu_32', value=mu2,expr='mu_30*(6584./6731.)')
abh = lmfit.Parameter('sig_32', value=sig22,expr='sig_30')
abi = lmfit.Parameter('amp_32', value=amp22,min=0.)
abj = lmfit.Parameter('mu_33', value=mu3,expr='mu_30*(6563./6731.)')
abk = lmfit.Parameter('sig_33', value=sig23,expr='sig_30')
abl = lmfit.Parameter('amp_33', value=amp23,min=0.)
abm = lmfit.Parameter('mu_34', value=mu4,expr='mu_30*(6548./6731.)')
abn = lmfit.Parameter('sig_34', value=sig24,expr='sig_30')
abo = lmfit.Parameter('amp_34', value=amp24,min=0.,expr='amp_32*(1./3.)')
abp = lmfit.Parameter('mu_35', value=mu5,expr='mu_30*(6300.304/6730.82)')
abq = lmfit.Parameter('sig_35', value=sig25,expr='sig_30')
abr = lmfit.Parameter('amp_35', value=amp25,min=0.)
abt = lmfit.Parameter('mu_36', value=mu6,expr='mu_30*(6363.77/6730.82)')
abu = lmfit.Parameter('sig_36', value=sig26,expr='sig_30')
abv = lmfit.Parameter('amp_36', value=amp26,min=0.,expr='amp_35*(1./3.)')
params3c.add_many(sl,it,cd1,de,ef,fg,gh,hi,ij,jk,kl,lm,mn,no,op,pq,qr,rs,st,tu,uv,vw,wy,aaa,aab,aac,aad,aae,aaf,aag,aah,aai,aaj,aak,aal,aam,aan,aao,aap,aaq,aar,aas,aat,aau,aba,abb,abc,abd,abe,abf,abg,abh,abi,abj,abk,abl,abm,abn,abo,abp,abq,abr,abt,abu,abv)
elif meth == 'O':
cd1 = lmfit.Parameter('mu_0', value=mu0,expr='mu_5*(6730.82/6300.30)')
de = lmfit.Parameter('sig_0', value=sig0,expr='sig_5')
ef = lmfit.Parameter('amp_0', value=amp0,min=0.)
fg = lmfit.Parameter('mu_1', value=mu1,expr='mu_5*(6716.44/6300.30)')
gh = lmfit.Parameter('sig_1', value=sig1,expr='sig_5')
hi = lmfit.Parameter('amp_1', value=amp1,min=0.)
ij = lmfit.Parameter('mu_2', value=mu2,expr='mu_5*(6584./6300.30)')
jk = lmfit.Parameter('sig_2', value=sig2,expr='sig_5')
kl = lmfit.Parameter('amp_2', value=amp2,min=0.)
lm = lmfit.Parameter('mu_3', value=mu3,expr='mu_5*(6563./6300.30)')
mn = lmfit.Parameter('sig_3', value=sig3,expr='sig_5')
no = lmfit.Parameter('amp_3', value=amp3,min=0.)
op = lmfit.Parameter('mu_4', value=mu4,expr='mu_5*(6548./6300.30)')
pq = lmfit.Parameter('sig_4', value=sig4,expr='sig_5')
qr = lmfit.Parameter('amp_4', value=amp4,min=0.,expr='amp_2*(1./3.)')
rs = lmfit.Parameter('mu_5', value=threeresu.values["mu_0"],vary=False)#mu5,expr='mu_0*(6300.304/6730.82)')
st = lmfit.Parameter('sig_5', value=threeresu.values["sig_0"],vary=False)
tu = lmfit.Parameter('amp_5', value=threeresu.values["amp_0"],vary=False)
uv = lmfit.Parameter('mu_6', value=threeresu.values["mu_1"],vary=False)#mu6,expr='mu_0*(6363.77/6730.82)')
vw = lmfit.Parameter('sig_6', value=threeresu.values["sig_1"],vary=False)
wy = lmfit.Parameter('amp_6', value=threeresu.values["amp_1"],vary=False)
aaa = lmfit.Parameter('mu_20', value=mu0,expr='mu_25*(6730.82/6300.30)')
aab = lmfit.Parameter('sig_20', value=sig20,expr='sig_25')
aac = lmfit.Parameter('amp_20', value=amp20,min=0.)
aad = lmfit.Parameter('mu_21', value=mu1,expr='mu_25*(6716.44/6300.30)')
aae = lmfit.Parameter('sig_21', value=sig21,expr='sig_25')
aaf = lmfit.Parameter('amp_21', value=amp21,min=0.)
aag = lmfit.Parameter('mu_22', value=mu2,expr='mu_25*(6584./6731.)')
aah = lmfit.Parameter('sig_22', value=sig22,expr='sig_25')
aai = lmfit.Parameter('amp_22', value=amp22,min=0.)
aaj = lmfit.Parameter('mu_23', value=mu3,expr='mu_25*(6563./6731.)')
aak = lmfit.Parameter('sig_23', value=sig23,expr='sig_25')
aal = lmfit.Parameter('amp_23', value=amp23,min=0.)
aam = lmfit.Parameter('mu_24', value=mu4,expr='mu_20*(6548./6731.)')
aan = lmfit.Parameter('sig_24', value=sig24,expr='sig_25')
aao = lmfit.Parameter('amp_24', value=amp24,min=0.,expr='amp_22*(1./3.)')
aap = lmfit.Parameter('mu_25', value=threeresu.values["mu_20"],vary=False)#mu5,expr='mu_20*(6300.304/6730.82)')
aaq = lmfit.Parameter('sig_25', value=threeresu.values["sig_20"],vary=False)
aar = lmfit.Parameter('amp_25', value=threeresu.values["amp_20"],vary=False)
aas = lmfit.Parameter('mu_26', value=threeresu.values["mu_21"],vary=False)#mu6,expr='mu_20*(6363.77/6730.82)')
aat = lmfit.Parameter('sig_26', value=threeresu.values["sig_21"],vary=False)
aau = lmfit.Parameter('amp_26', value=threeresu.values["mu_21"],vary=False)
aba = lmfit.Parameter('mu_30', value=mu0,expr='mu_35*(6730.82/6300.30)')
abb = lmfit.Parameter('sig_30', value=sig30,expr='sig_35')
abc = lmfit.Parameter('amp_30', value=amp30,min=0.)
abd = lmfit.Parameter('mu_31', value=mu1,expr='mu_35*(6716.44/6300.30)')
abe = lmfit.Parameter('sig_31', value=sig31,expr='sig_35')
abf = lmfit.Parameter('amp_31', value=amp31,min=0.)
abg = lmfit.Parameter('mu_32', value=mu2,expr='mu_30*(6584./6731.)')
abh = lmfit.Parameter('sig_32', value=sig22,expr='sig_30')
abi = lmfit.Parameter('amp_32', value=amp22,min=0.)
abj = lmfit.Parameter('mu_33', value=mu3,expr='mu_30*(6563./6731.)')
abk = lmfit.Parameter('sig_33', value=sig23,expr='sig_30')
abl = lmfit.Parameter('amp_33', value=amp23,min=0.)
abm = lmfit.Parameter('mu_34', value=mu4,expr='mu_30*(6548./6731.)')
abn = lmfit.Parameter('sig_34', value=sig24,expr='sig_30')
abo = lmfit.Parameter('amp_34', value=amp24,min=0.,expr='amp_32*(1./3.)')
abp = lmfit.Parameter('mu_35', value=threeresu.values["mu_30"],vary=False)#mu5,expr='mu_30*(6300.304/6730.82)')
abq = lmfit.Parameter('sig_35', value=threeresu.values["sig_30"],vary=False)
abr = lmfit.Parameter('amp_35', value=threeresu.values["amp_30"],vary=False)
abt = lmfit.Parameter('mu_36', value=threeresu.values["mu_31"],vary=False)#mu6,expr='mu_30*(6363.77/6730.82)')
abu = lmfit.Parameter('sig_36', value=threeresu.values["sig_31"],vary=False)
abv = lmfit.Parameter('amp_36', value=threeresu.values["amp_31"],vary=False)
params3c.add_many(sl,it,rs,st,tu,uv,vw,wy,cd1,de,ef,fg,gh,hi,ij,jk,kl,lm,mn,no,op,pq,qr,aap,aaq,aar,aas,aat,aau,aaa,aab,aac,aad,aae,aaf,aag,aah,aai,aaj,aak,aal,aam,aan,aao,abp,abq,abr,abt,abu,abv,aba,abb,abc,abd,abe,abf,abg,abh,abi,abj,abk,abl,abm,abn,abo)
T3CResu = threecomp_mod.fit(data_cor,params=params3c,x=l)
with open(parentFold+'fit3CAll_result.txt', 'w') as fh: fh.write(T3CResu.fit_report())
########################## Calculate gaussians and final fit ##########################
# Now we create and plot the individual gaussians of the fit
gaus1 = Ofuncts.gaussian(l,T3CResu.values['mu_0'],T3CResu.values['sig_0'],T3CResu.values['amp_0'])
gaus2 = Ofuncts.gaussian(l,T3CResu.values['mu_1'],T3CResu.values['sig_1'],T3CResu.values['amp_1'])
gaus3 = Ofuncts.gaussian(l,T3CResu.values['mu_2'],T3CResu.values['sig_2'],T3CResu.values['amp_2'])
gaus4 = Ofuncts.gaussian(l,T3CResu.values['mu_3'],T3CResu.values['sig_3'],T3CResu.values['amp_3'])
gaus5 = Ofuncts.gaussian(l,T3CResu.values['mu_4'],T3CResu.values['sig_4'],T3CResu.values['amp_4'])
gaus6 = Ofuncts.gaussian(l,T3CResu.values['mu_5'],T3CResu.values['sig_5'],T3CResu.values['amp_5'])
gaus7 = Ofuncts.gaussian(l,T3CResu.values['mu_6'],T3CResu.values['sig_6'],T3CResu.values['amp_6'])
gaus21 = Ofuncts.gaussian(l,T3CResu.values['mu_20'],T3CResu.values['sig_20'],T3CResu.values['amp_20'])
gaus22 = Ofuncts.gaussian(l,T3CResu.values['mu_21'],T3CResu.values['sig_21'],T3CResu.values['amp_21'])
gaus23 = Ofuncts.gaussian(l,T3CResu.values['mu_22'],T3CResu.values['sig_22'],T3CResu.values['amp_22'])
gaus24 = Ofuncts.gaussian(l,T3CResu.values['mu_23'],T3CResu.values['sig_23'],T3CResu.values['amp_23'])
gaus25 = Ofuncts.gaussian(l,T3CResu.values['mu_24'],T3CResu.values['sig_24'],T3CResu.values['amp_24'])
gaus26 = Ofuncts.gaussian(l,T3CResu.values['mu_25'],T3CResu.values['sig_25'],T3CResu.values['amp_25'])
gaus27 = Ofuncts.gaussian(l,T3CResu.values['mu_26'],T3CResu.values['sig_26'],T3CResu.values['amp_26'])
gaus31 = Ofuncts.gaussian(l,T3CResu.values['mu_30'],T3CResu.values['sig_30'],T3CResu.values['amp_30'])
gaus32 = Ofuncts.gaussian(l,T3CResu.values['mu_31'],T3CResu.values['sig_31'],T3CResu.values['amp_31'])
gaus33 = Ofuncts.gaussian(l,T3CResu.values['mu_32'],T3CResu.values['sig_32'],T3CResu.values['amp_32'])
gaus34 = Ofuncts.gaussian(l,T3CResu.values['mu_33'],T3CResu.values['sig_33'],T3CResu.values['amp_33'])
gaus35 = Ofuncts.gaussian(l,T3CResu.values['mu_34'],T3CResu.values['sig_34'],T3CResu.values['amp_34'])
gaus36 = Ofuncts.gaussian(l,T3CResu.values['mu_35'],T3CResu.values['sig_35'],T3CResu.values['amp_35'])
gaus37 = Ofuncts.gaussian(l,T3CResu.values['mu_36'],T3CResu.values['sig_36'],T3CResu.values['amp_36'])
T3Cfin_fit = T3CResu.best_fit
# one component
stdf_s2 = np.std(data_cor[np.where(l<l1)[0][-1]:np.where(l>l2)[0][0]+10]-T3Cfin_fit[np.where(l<l1)[0][-1]:np.where(l>l2)[0][0]+10])
stdf_s1 = np.std(data_cor[np.where(l<l3)[0][-1]-10:np.where(l>l4)[0][0]]-T3Cfin_fit[np.where(l<l3)[0][-1]-10:np.where(l>l4)[0][0]])
stdf_n2 = np.std(data_cor[np.where(l<l5)[0][-1]:np.where(l>l6)[0][0]+10]-T3Cfin_fit[np.where(l<l5)[0][-1]:np.where(l>l6)[0][0]+10])
stdf_ha = np.std(data_cor[np.where(l<l7)[0][-1]:np.where(l>l8)[0][0]]-T3Cfin_fit[np.where(l<l7)[0][-1]:np.where(l>l8)[0][0]])
stdf_n1 = np.std(data_cor[np.where(l<l9)[0][-1]-10:np.where(l>l10)[0][0]]-T3Cfin_fit[np.where(l<l9)[0][-1]-10:np.where(l>l10)[0][0]])
stdf_o1 = np.std(data_cor[np.where(l<l11)[0][-1]-10:np.where(l>l12)[0][0]]-T3Cfin_fit[np.where(l<l11)[0][-1]-10:np.where(l>l12)[0][0]])
stdf_o2 = np.std(data_cor[np.where(l<l13)[0][-1]-10:np.where(l>l14)[0][0]]-T3Cfin_fit[np.where(l<l13)[0][-1]-10:np.where(l>l14)[0][0]])
print('The condition for each line (in the same order as before) needs to be std_line < 3*std_cont --> for 1 components is... ')
print(' For SII2: '+str(stdf_s2/stadev)+' < 3')
print(' For SII1: '+str(stdf_s1/stadev)+' < 3')
print(' For NII2: '+str(stdf_n2/stadev)+' < 3')
print(' For Halpha: '+str(stdf_ha/stadev)+' < 3')
print(' For NII1: '+str(stdf_n1/stadev)+' < 3')
print(' For OI1: '+str(stdf_o1/stadev)+' < 3')
print(' For OI2: '+str(stdf_o2/stadev)+' < 3')
if os.path.exists(parentFold+'eps_adj'+str(meth)+'_3C.txt'): os.remove(parentFold+'eps_adj'+str(meth)+'_3C.txt')
np.savetxt(parentFold+'eps_adj'+str(meth)+'_3C.txt',np.c_[stdf_s2/stadev,stdf_s1/stadev,stdf_n2/stadev,stdf_ha/stadev,stdf_n1/stadev,stdf_o2/stadev,stdf_o1/stadev,T3CResu.chisqr], ('%8.5f','%8.5f','%8.5f','%8.5f','%8.5f','%8.5f','%8.5f','%8.5f'),header=('SII2\tSII1\tNII2\tHa\tNII1\tOI1\tOI2\tChi2'))
try:
# We determine the maximum flux of the fit for all the lines, and the velocity and sigma components
max2S1 = T3Cfin_fit[np.where(abs(T3CResu.values['mu_0']-l)<0.5)[0][0]]
max2S2 = T3Cfin_fit[np.where(abs(T3CResu.values['mu_1']-l)<0.5)[0][0]]
max2N1 = T3Cfin_fit[np.where(abs(T3CResu.values['mu_2']-l)<0.5)[0][0]]
max2Ha = T3Cfin_fit[np.where(abs(T3CResu.values['mu_3']-l)<0.5)[0][0]]
max2N2 = T3Cfin_fit[np.where(abs(T3CResu.values['mu_4']-l)<0.5)[0][0]]
max2O1 = T3Cfin_fit[np.where(abs(T3CResu.values['mu_5']-l)<0.5)[0][0]]
max2O2 = T3Cfin_fit[np.where(abs(T3CResu.values['mu_6']-l)<0.5)[0][0]]
except IndexError:
print('ERROR: index out of range. Setting the flux values of the OI 1 line to 0.')
# Calculus of the velocity and sigma for the three components
sig30S2 = pix_to_v*np.sqrt(T3CResu.values['sig_3']**2-sig_inst**2)
sig31S2 = pix_to_v*np.sqrt(T3CResu.values['sig_23']**2-sig_inst**2)
sig32S2 = pix_to_v*np.sqrt(T3CResu.values['sig_33']**2-sig_inst**2)
v30S2 = v_luz*((T3CResu.values['mu_3']-l_Halpha)/l_Halpha)
v31S2 = v_luz*((T3CResu.values['mu_23']-l_Halpha)/l_Halpha)
v32S2 = v_luz*((T3CResu.values['mu_33']-l_Halpha)/l_Halpha)
# Using SII lines as reference
if meth == 'S':
if threeresu.params['sig_0'].stderr == None:
print('Problem determining the errors! First component sigma ')
esig30S2 = 0.
elif threeresu.params['sig_0'].stderr != None:
esig30S2 = pix_to_v*(2*T3CResu.values['sig_0']*threeresu.params['sig_0'].stderr)/(np.sqrt(T3CResu.values['sig_0']**2-sig_inst**2))
if threeresu.params['sig_20'].stderr == None:
print('Problem determining the errors! Second component sigma ')
esig31S2 = 0.
elif threeresu.params['sig_20'].stderr != None:
esig31S2 = pix_to_v*(2*T3CResu.values['sig_20']*threeresu.params['sig_20'].stderr)/(np.sqrt(T3CResu.values['sig_20']**2-sig_inst**2))
if threeresu.params['sig_30'].stderr == None:
print('Problem determining the errors! Second component sigma ')
esig32S2 = 0.
elif threeresu.params['sig_30'].stderr != None:
esig32S2 = pix_to_v*(2*T3CResu.values['sig_30']*threeresu.params['sig_30'].stderr)/(np.sqrt(T3CResu.values['sig_30']**2-sig_inst**2))
if threeresu.params['mu_0'].stderr == None:
print('Problem determining the errors! First component ')
ev30S2= 0.
elif tworesu.params['mu_0'].stderr != None:
print('Problem determining the errors! Second component ')
ev30S2 = ((v_luz/l_SII_2)*T3CResu.params['mu_0'].stderr)
if threeresu.params['mu_20'].stderr == None:
ev31S2 = 0.
elif threeresu.params['mu_20'].stderr != None:
ev31S2 = ((v_luz/l_SII_2)*threeresu.params['mu_20'].stderr)
if threeresu.params['mu_30'].stderr == None:
ev32S2 = 0.
elif threeresu.params['mu_30'].stderr != None:
ev32S2 = ((v_luz/l_SII_2)*threeresu.params['mu_30'].stderr)
# Using OI lines as reference
elif meth == 'O':
if threeresu.params['sig_5'].stderr == None:
print('Problem determining the errors! First component sigma ')
esig30S2 = 0.
elif threeresu.params['sig_5'].stderr != None:
esig30S2 = pix_to_v*(2*T3CResu.values['sig_5']*threeresu.params['sig_5'].stderr)/(np.sqrt(T3CResu.values['sig_5']**2-sig_inst**2))
if threeresu.params['sig_25'].stderr == None:
print('Problem determining the errors! Second component sigma ')
esig31S2 = 0.
elif threeresu.params['sig_25'].stderr != None:
esig31S2 = pix_to_v*(2*T3CResu.values['sig_25']*threeresu.params['sig_25'].stderr)/(np.sqrt(T3CResu.values['sig_25']**2-sig_inst**2))
if threeresu.params['sig_35'].stderr == None:
print('Problem determining the errors! Third component sigma ')
esig32S2 = 0.
elif threeresu.params['sig_35'].stderr != None:
esig32S2 = pix_to_v*(2*T3CResu.values['sig_35']*threeresu.params['sig_35'].stderr)/(np.sqrt(T3CResu.values['sig_35']**2-sig_inst**2))
if threeresu.params['mu_5'].stderr == None:
print('Problem determining the errors! First component ')
ev30S2 = 0.
elif tworesu.params['mu_5'].stderr != None:
print('Problem determining the errors! Second component ')
ev30S2 = ((v_luz/l_OI_1)*T3CResu.params['mu_5'].stderr)
if threeresu.params['mu_25'].stderr == None:
ev31S2 = 0.
elif threeresu.params['mu_25'].stderr != None:
ev31S2 = ((v_luz/l_OI_1)*threeresu.params['mu_25'].stderr)
if threeresu.params['mu_35'].stderr == None:
ev32S2 = 0.
elif threeresu.params['mu_35'].stderr != None:
ev32S2 = ((v_luz/l_OI_1)*threeresu.params['mu_35'].stderr)
# Save the velocity and sigma for all components
if os.path.exists(parentFold+'v_sig_adj_3C.txt'): os.remove(parentFold+'v_sig_adj_3C.txt')
np.savetxt(parentFold+'v_sig_adj_3C.txt',np.c_[v30S2,ev30S2,v31S2,ev31S2,v32S2,ev32S2,sig30S2,esig30S2,sig31S2,esig31S2,sig32S2,esig32S2],('%8.5f','%8.5f','%8.5f','%8.5f','%8.5f','%8.5f','%8.5f','%8.5f','%8.5f','%8.5f','%8.5f','%8.5f'),header=('v_ref2\tev_ref2\tv_2ref2\tev_2ref2\tv_3ref2\tev_3ref2\tsig_ref2\tesig_ref2\tsig_2ref2\tesig_2ref2\tsig_3ref2\tesig_3ref2'))
# Save the fluxes for all components
if os.path.exists(parentFold+'fluxes_'+str(meth)+'_3C_Ncomp.txt'): os.remove(parentFold+'fluxes_'+str(meth)+'_3C_Ncomp.txt')
np.savetxt(parentFold+'fluxes_3C_Ncomp.txt',np.c_[sum(gaus1),sum(gaus2),sum(gaus3),sum(gaus4),sum(gaus5),sum(gaus6),sum(gaus7)],fmt=('%8.16f','%8.16f','%8.16f','%8.16f','%8.16f','%8.16f','%8.16f'),header=('SII_6731\tSII_6716\tNII_6584\tHalpha\tNII_6548\tOI_6300\tOI_6363'))
if os.path.exists(parentFold+'fluxes_'+str(meth)+'_3C_Scomp.txt'): os.remove(parentFold+'fluxes_'+str(meth)+'_3C_Scomp.txt')
np.savetxt(parentFold+'fluxes_3C_Scomp.txt',np.c_[sum(gaus11),sum(gaus12),sum(gaus13),sum(gaus14),sum(gaus15),sum(gaus16),sum(gaus17)],('%8.16f','%8.16f','%8.16f','%8.16f','%8.16f','%8.16f','%8.16f'),header=('SII_6731\tSII_6716\tNII_6584\tHalpha\tNII_6548\tOI_6300\tOI_6363'))
if os.path.exists(parentFold+'fluxes_'+str(meth)+'_3C_N2comp.txt'): os.remove(parentFold+'fluxes_'+str(meth)+'_3C_N2comp.txt')
np.savetxt(parentFold+'fluxes_3C_N2comp.txt',np.c_[sum(gaus21),sum(gaus22),sum(gaus23),sum(gaus24),sum(gaus25),sum(gaus26),sum(gaus27)],('%8.16f','%8.16f','%8.16f','%8.16f','%8.16f','%8.16f','%8.16f'),header=('SII_6731\tSII_6716\tNII_6584\tHalpha\tNII_6548\tOI_6300\tOI_6363'))
########################### PLOT #############################
plt.close('all')
# MAIN plot
fig1 = plt.figure(1,figsize=(10, 9))
frame1 = fig1.add_axes((.1,.25,.85,.65)) # xstart, ystart, xend, yend [units are fraction of the image frame, from bottom left corner]
plt.plot(l,data_cor,'k',linewidth=2) # Initial data
plt.plot(l[std0:std1],data_cor[std0:std1],c='y',linewidth=4) # Zone where the stddev is calculated
plt.plot(l[std0:std1],data_cor[std0:std1],'k',linewidth=1) # Initial data
#plt.plot(l,(linresu.values['slope']*l+linresu.values['intc']),c='y',linestyle=(0, (5, 8)),label='Linear fit')
plt.plot(l,gaus1+lin_data_fin,c='g',linestyle='-')
plt.plot(l,gaus2+lin_data_fin,c='g',linestyle='-')
plt.plot(l,gaus3+lin_data_fin,c='g',linestyle='-')
plt.plot(l,gaus4+lin_data_fin,c='g',linestyle='-')
plt.plot(l,gaus5+lin_data_fin,c='g',linestyle='-')
plt.plot(l,gaus6+lin_data_fin,c='g',linestyle='-')
plt.plot(l,gaus7+lin_data_fin,c='g',linestyle='-')
plt.plot(l,gaus11+lin_data_fin,c='dodgerblue',linestyle='-')
plt.plot(l,gaus12+lin_data_fin,c='dodgerblue',linestyle='-')
plt.plot(l,gaus13+lin_data_fin,c='dodgerblue',linestyle='-')
plt.plot(l,gaus14+lin_data_fin,c='dodgerblue',linestyle='-')
plt.plot(l,gaus15+lin_data_fin,c='dodgerblue',linestyle='-')
plt.plot(l,gaus16+lin_data_fin,c='dodgerblue',linestyle='-')
plt.plot(l,gaus17+lin_data_fin,c='dodgerblue',linestyle='-')
plt.plot(l,gaus21+lin_data_fin,c='magenta',linestyle='-')
plt.plot(l,gaus22+lin_data_fin,c='magenta',linestyle='-')
plt.plot(l,gaus23+lin_data_fin,c='magenta',linestyle='-')
plt.plot(l,gaus24+lin_data_fin,c='magenta',linestyle='-')
plt.plot(l,gaus25+lin_data_fin,c='magenta',linestyle='-')
plt.plot(l,gaus26+lin_data_fin,c='magenta',linestyle='-')
plt.plot(l,gaus27+lin_data_fin,c='magenta',linestyle='-')
plt.plot(l,T3Cfin_fit,'r-')
textstr = '\n'.join((r'$V_{SII_{3-1comp}}$ = '+ '{:.2f} +- {:.2f}'.format(v30S2,ev30S2),
r'$V_{SII_{3-2comp}}$ = '+ '{:.2f} +- {:.2f}'.format(v31S2,ev31S2),
r'$V_{SII_{3-3comp}}$ = '+ '{:.2f} +- {:.2f}'.format(v32S2,ev32S2),
r'$\sigma_{SII_{3-1comp}}$ = '+ '{:.2f} +- {:.2f}'.format(sig30S2,esig30S2),
r'$\sigma_{SII_{3-2comp}}$ = '+ '{:.2f} +- {:.2f}'.format(sig31S2,esig31S2),
r'$\sigma_{SII_{3-3comp}}$ = '+ '{:.2f} +- {:.2f}'.format(sig32S2,esig32S2),
#r'$\frac{F_{SII_{2}}}{F_{SII_{1}}}$ = '+ '{:.3f}'.format(max2S2/max2S1),
r'$F_{H_{\alpha}}$ = '+ '{:.3f}'.format(max2Ha)+' $10^{-14}$'))
frame1.set_xticklabels([]) # Remove x-tic labels for the first frame
plt.ylabel(r'Flux (erg $\rm cm^{-2} s^{-1} \AA^{-1}$)',fontsize=19)
plt.tick_params(axis='both', labelsize=17)
plt.xlim(l[0],l[-1])
plt.gca().yaxis.set_major_locator(plt.MaxNLocator(prune='lower'))
plt.text(0.81,0.9,'N1',color='g',transform=frame1.transAxes,fontsize=21)
plt.text(0.84,0.9,'+',color='k',transform=frame1.transAxes,fontsize=21)
plt.text(0.87,0.9,'S',color='dodgerblue',transform=frame1.transAxes,fontsize=21)
plt.text(0.9,0.9,'+',color='k',transform=frame1.transAxes,fontsize=21)
plt.text(0.93,0.9,'N2',color='magenta',transform=frame1.transAxes,fontsize=21)
# RESIDUAL plot
frame2 = fig1.add_axes((.1,.1,.85,.15))
plt.plot(l,np.zeros(len(l)),c='orange',linestyle='-') # Line around zero
plt.plot(l,data_cor-T3Cfin_fit,color='k') # Main
plt.xlabel('Wavelength ($\AA$)',fontsize=19)
plt.ylabel('Residuals',fontsize=19)
plt.tick_params(axis='both', labelsize=17)
plt.xlim(l[0],l[-1])
plt.plot(l,np.zeros(len(l))+1.5*stadev,c='orange',linestyle=(0,(5,8))) # 3 sigma upper limit
plt.plot(l,np.zeros(len(l))-1.5*stadev,c='orange',linestyle=(0,(5,8))) # 3 sigma down limit
plt.ylim(-(3*stadev)*3,(3*stadev)*3)
plt.savefig(parentFold+'adj_full_3comp.pdf',format='pdf',bbox_inches='tight',pad_inches=0.2)
props = dict(boxstyle='round',facecolor='white', alpha=0.5)
frame1.text(6250.,max(data_cor),textstr,fontsize=12,verticalalignment='top', bbox=props)
plt.savefig(parentFold+'adj_full_3comp.png',bbox_inches='tight',pad_inches=0.2)
return gaus1,gaus2,gaus3,gaus4,gaus5,gaus6,gaus7,gaus11,gaus12,gaus13,gaus14,gaus15,gaus16,gaus17,gaus21,gaus22,gaus23,gaus24,gaus25,gaus26,gaus27,v30S2,ev30S2,v31S2,ev31S2,v32S2,ev32S2,sig30S2,esig30S2,sig31S2,esig31S2,sig30S2,esig30S2,T3CResu
| [
"laurahm94@gmail.com"
] | laurahm94@gmail.com |
b8ed825405fcab4a1b57b395346dbcae72625ae4 | b5a6e2f020e8ca7b6e25d7ea07f82a0cff8067c0 | /lesson 7. kalman_vector_1.py | 4feb3ca23cc3e6ea5b735801c3691756f5cd07c5 | [] | no_license | Tsurikov-Aleksandr/python-algorithms | 1c2512c43eb28332d40409ef01a366d754cc589f | fc6468013f332e99bc16b66dac30161ffe2085dc | refs/heads/master | 2023-09-02T20:03:06.903618 | 2021-11-05T05:50:06 | 2021-11-05T05:50:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,812 | py | import numpy as np
import matplotlib.pyplot as plt
N = 100 # число наблюдений
dNoise = 1 # дисперсия шума
dSignal = 5 # дисперсия сигнала
r = 0.99 # коэффициент корреляции в модели движения
en = 0.1 # дисперсия СВ в модели движения
M = 3 # размерность вектора координат положения объекта
R = np.array([[r, 0, 0], [0, r, 0], [0, 0, r]])
Vksi = np.eye(M)*en # диагональная матрица с дисперсиями en по главной диагонали
V = np.eye(M)*dNoise # диагональная матрица с дисперсиями ошибок наблюдений
x = np.zeros(N*M).reshape(N, M) # истинные координаты перемещения (пока просто нули)
x[:][0] = np.random.normal(0, dSignal, M) # формирование первой координаты
for i in range(1, N): # формирование последующих координат по модели АР
x[:][i] = np.dot(R, x[:][i-1]) + np.random.normal(0, en, M)
z = x + np.random.normal(0, dNoise, size=(N, M)) # формирование наблюдений
# фильтрация сигнала с помощью фильтра Калмана
xx = np.zeros(N*M).reshape(N, M) # вектор для хранения оценок перемещений
P = np.zeros(M*M).reshape(M, M) # вектор для хранения дисперсий ошибок оценивания
xx[:][0] = z[:][0] # первая оценка
P = V # дисперсия первой оценки
Vinv = np.linalg.inv(V) # вычисление обратной матрицы дисперсий ошибок наблюдений
# рекуррентное вычисление оценок по фильтру Калмана
for i in range(1, N):
Pe = np.dot(np.dot(R, P), R.T) + Vksi
P = np.dot(Pe, V)*np.linalg.inv(Pe+V)
xe = np.dot(R, xx[:][i-1])
xx[:][i] = xe+np.dot(np.dot(P, Vinv), (z[:][i]-xe))
# отображение результатов
fig, (axX, axY, axZ) = plt.subplots(nrows=3, ncols=1, figsize=(10, 6))
res = xx.reshape(M*N)
resX = x.reshape(M*N)
resZ = z.reshape(M*N)
axX.plot(resX[0:N*M:M]); axX.plot(resZ[0:N*M:M]); axX.plot(res[0:N*M:M])
axY.plot(resX[1:N*M:M]); axY.plot(resZ[1:N*M:M]); axY.plot(res[1:N*M:M])
axZ.plot(resX[2:N*M:M]); axZ.plot(resZ[2:N*M:M]); axZ.plot(res[2:N*M:M])
axX.set_ylabel('Ось X')
axY.set_ylabel('Ось Y')
axZ.set_ylabel('Ось Z')
axX.grid(True)
axY.grid(True)
axZ.grid(True)
plt.show()
| [
"noreply@github.com"
] | Tsurikov-Aleksandr.noreply@github.com |
ee4edb70e193781c9779f8e5c27adb658d6cae2c | 783be0e2e489b55f8a5972d48d9c3089094088d9 | /api/migrations/0008_auto_20210221_1648.py | 7c73658b4a790424ca1866d51c866ddf2dc7924f | [] | no_license | Vengixlabs/saap | 6892fedf1dc0a83e4c2b4a45d0007552608b6092 | 19f128ee9e5ef5655aac6786b7a0482c3e714db6 | refs/heads/main | 2023-03-20T05:04:17.883231 | 2021-02-27T19:40:36 | 2021-02-27T19:40:36 | 342,942,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | # Generated by Django 3.1.6 on 2021-02-21 11:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0007_auto_20210221_1647'),
]
operations = [
migrations.AlterField(
model_name='posts',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
| [
"adarshreddy9849@gmail.com"
] | adarshreddy9849@gmail.com |
5a21ea274615b8d74f05ae02f26a2d9b4cff11af | 4a46b3d9b630a583e92112f7c9c1531752103ba4 | /String split and join.py | e863644feb1733abb8d9947b08f918cf39f119eb | [] | no_license | abhib074/HackerRank---Python | 0e20d6816f5b7197dd2eedfb4d68524eae9de9ce | 27896bb6cff99b309270471207aed54941ad8590 | refs/heads/main | 2023-04-09T05:51:57.967701 | 2021-04-23T18:23:58 | 2021-04-23T18:23:58 | 360,969,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | def split_and_join(line):
line_split = line.split(" ")
line_join = "-".join(line_split)
return line_join
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result) | [
"59416748+abhib074@users.noreply.github.com"
] | 59416748+abhib074@users.noreply.github.com |
452f266344d14193f6028c183b1c1184c8728329 | fab14fae2b494068aa793901d76464afb965df7e | /benchmarks/ltl_maxplus/f3/maxplus_24_91.py | 1be82cb21b86b50ca49efd9cc197df337fc8d3d4 | [
"MIT"
] | permissive | teodorov/F3 | 673f6f9ccc25acdfdecbfc180f439253474ba250 | c863215c318d7d5f258eb9be38c6962cf6863b52 | refs/heads/master | 2023-08-04T17:37:38.771863 | 2021-09-16T07:38:28 | 2021-09-16T07:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60,763 | py |
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_true, msat_make_false
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type
from mathsat import msat_make_and as _msat_make_and
from mathsat import msat_make_or as _msat_make_or
from mathsat import msat_make_not
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
def msat_make_and(menv: msat_env, *args):
if len(args) == 0:
return msat_make_true(menv)
if len(args) == 1:
return args[0]
res = _msat_make_and(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_and(menv, res, arg)
return res
def msat_make_or(menv: msat_env, *args):
if len(args) == 0:
return msat_make_false(menv)
if len(args) == 1:
return args[0]
res = _msat_make_or(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_or(menv, res, arg)
return res
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_m1 = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, n_m1)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
names = ["x_0", "x_1", "x_2", "x_3", "x_4", "x_5", "x_6", "x_7", "x_8", "x_9", "x_10", "x_11", "x_12", "x_13", "x_14", "x_15", "x_16", "x_17", "x_18", "x_19", "x_20", "x_21", "x_22", "x_23"]
xs = [msat_declare_function(menv, name, real_type)
for name in names]
xs = [msat_make_constant(menv, x) for x in xs]
x_xs = [msat_declare_function(menv, name_next(name), real_type)
for name in names]
x_xs = [msat_make_constant(menv, x_x) for x_x in x_xs]
curr2next = {x: x_x for x, x_x in zip(xs, x_xs)}
n_10_0 = msat_make_number(menv, "10.0")
n_11_0 = msat_make_number(menv, "11.0")
n_12_0 = msat_make_number(menv, "12.0")
n_13_0 = msat_make_number(menv, "13.0")
n_14_0 = msat_make_number(menv, "14.0")
n_15_0 = msat_make_number(menv, "15.0")
n_16_0 = msat_make_number(menv, "16.0")
n_17_0 = msat_make_number(menv, "17.0")
n_18_0 = msat_make_number(menv, "18.0")
n_19_0 = msat_make_number(menv, "19.0")
n_1_0 = msat_make_number(menv, "1.0")
n_20_0 = msat_make_number(menv, "20.0")
n_2_0 = msat_make_number(menv, "2.0")
n_3_0 = msat_make_number(menv, "3.0")
n_4_0 = msat_make_number(menv, "4.0")
n_5_0 = msat_make_number(menv, "5.0")
n_6_0 = msat_make_number(menv, "6.0")
n_7_0 = msat_make_number(menv, "7.0")
n_8_0 = msat_make_number(menv, "8.0")
n_9_0 = msat_make_number(menv, "9.0")
init = msat_make_true(menv)
trans = msat_make_true(menv)
# transitions
expr0 = msat_make_plus(menv, xs[3], n_9_0)
expr1 = msat_make_plus(menv, xs[4], n_5_0)
expr2 = msat_make_plus(menv, xs[6], n_16_0)
expr3 = msat_make_plus(menv, xs[9], n_15_0)
expr4 = msat_make_plus(menv, xs[10], n_8_0)
expr5 = msat_make_plus(menv, xs[12], n_10_0)
expr6 = msat_make_plus(menv, xs[14], n_9_0)
expr7 = msat_make_plus(menv, xs[15], n_18_0)
expr8 = msat_make_plus(menv, xs[17], n_4_0)
expr9 = msat_make_plus(menv, xs[18], n_6_0)
expr10 = msat_make_plus(menv, xs[21], n_4_0)
expr11 = msat_make_plus(menv, xs[22], n_18_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[0], expr0),
msat_make_geq(menv, x_xs[0], expr1),
msat_make_geq(menv, x_xs[0], expr2),
msat_make_geq(menv, x_xs[0], expr3),
msat_make_geq(menv, x_xs[0], expr4),
msat_make_geq(menv, x_xs[0], expr5),
msat_make_geq(menv, x_xs[0], expr6),
msat_make_geq(menv, x_xs[0], expr7),
msat_make_geq(menv, x_xs[0], expr8),
msat_make_geq(menv, x_xs[0], expr9),
msat_make_geq(menv, x_xs[0], expr10),
msat_make_geq(menv, x_xs[0], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[0], expr0),
msat_make_equal(menv, x_xs[0], expr1),
msat_make_equal(menv, x_xs[0], expr2),
msat_make_equal(menv, x_xs[0], expr3),
msat_make_equal(menv, x_xs[0], expr4),
msat_make_equal(menv, x_xs[0], expr5),
msat_make_equal(menv, x_xs[0], expr6),
msat_make_equal(menv, x_xs[0], expr7),
msat_make_equal(menv, x_xs[0], expr8),
msat_make_equal(menv, x_xs[0], expr9),
msat_make_equal(menv, x_xs[0], expr10),
msat_make_equal(menv, x_xs[0], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_7_0)
expr1 = msat_make_plus(menv, xs[2], n_19_0)
expr2 = msat_make_plus(menv, xs[3], n_11_0)
expr3 = msat_make_plus(menv, xs[6], n_12_0)
expr4 = msat_make_plus(menv, xs[8], n_14_0)
expr5 = msat_make_plus(menv, xs[11], n_11_0)
expr6 = msat_make_plus(menv, xs[12], n_18_0)
expr7 = msat_make_plus(menv, xs[14], n_17_0)
expr8 = msat_make_plus(menv, xs[15], n_10_0)
expr9 = msat_make_plus(menv, xs[16], n_8_0)
expr10 = msat_make_plus(menv, xs[18], n_11_0)
expr11 = msat_make_plus(menv, xs[22], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[1], expr0),
msat_make_geq(menv, x_xs[1], expr1),
msat_make_geq(menv, x_xs[1], expr2),
msat_make_geq(menv, x_xs[1], expr3),
msat_make_geq(menv, x_xs[1], expr4),
msat_make_geq(menv, x_xs[1], expr5),
msat_make_geq(menv, x_xs[1], expr6),
msat_make_geq(menv, x_xs[1], expr7),
msat_make_geq(menv, x_xs[1], expr8),
msat_make_geq(menv, x_xs[1], expr9),
msat_make_geq(menv, x_xs[1], expr10),
msat_make_geq(menv, x_xs[1], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[1], expr0),
msat_make_equal(menv, x_xs[1], expr1),
msat_make_equal(menv, x_xs[1], expr2),
msat_make_equal(menv, x_xs[1], expr3),
msat_make_equal(menv, x_xs[1], expr4),
msat_make_equal(menv, x_xs[1], expr5),
msat_make_equal(menv, x_xs[1], expr6),
msat_make_equal(menv, x_xs[1], expr7),
msat_make_equal(menv, x_xs[1], expr8),
msat_make_equal(menv, x_xs[1], expr9),
msat_make_equal(menv, x_xs[1], expr10),
msat_make_equal(menv, x_xs[1], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_18_0)
expr1 = msat_make_plus(menv, xs[2], n_7_0)
expr2 = msat_make_plus(menv, xs[3], n_1_0)
expr3 = msat_make_plus(menv, xs[4], n_13_0)
expr4 = msat_make_plus(menv, xs[6], n_9_0)
expr5 = msat_make_plus(menv, xs[7], n_8_0)
expr6 = msat_make_plus(menv, xs[8], n_18_0)
expr7 = msat_make_plus(menv, xs[10], n_7_0)
expr8 = msat_make_plus(menv, xs[13], n_2_0)
expr9 = msat_make_plus(menv, xs[15], n_18_0)
expr10 = msat_make_plus(menv, xs[18], n_14_0)
expr11 = msat_make_plus(menv, xs[21], n_8_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[2], expr0),
msat_make_geq(menv, x_xs[2], expr1),
msat_make_geq(menv, x_xs[2], expr2),
msat_make_geq(menv, x_xs[2], expr3),
msat_make_geq(menv, x_xs[2], expr4),
msat_make_geq(menv, x_xs[2], expr5),
msat_make_geq(menv, x_xs[2], expr6),
msat_make_geq(menv, x_xs[2], expr7),
msat_make_geq(menv, x_xs[2], expr8),
msat_make_geq(menv, x_xs[2], expr9),
msat_make_geq(menv, x_xs[2], expr10),
msat_make_geq(menv, x_xs[2], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[2], expr0),
msat_make_equal(menv, x_xs[2], expr1),
msat_make_equal(menv, x_xs[2], expr2),
msat_make_equal(menv, x_xs[2], expr3),
msat_make_equal(menv, x_xs[2], expr4),
msat_make_equal(menv, x_xs[2], expr5),
msat_make_equal(menv, x_xs[2], expr6),
msat_make_equal(menv, x_xs[2], expr7),
msat_make_equal(menv, x_xs[2], expr8),
msat_make_equal(menv, x_xs[2], expr9),
msat_make_equal(menv, x_xs[2], expr10),
msat_make_equal(menv, x_xs[2], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_4_0)
expr1 = msat_make_plus(menv, xs[2], n_2_0)
expr2 = msat_make_plus(menv, xs[3], n_9_0)
expr3 = msat_make_plus(menv, xs[5], n_7_0)
expr4 = msat_make_plus(menv, xs[6], n_5_0)
expr5 = msat_make_plus(menv, xs[9], n_10_0)
expr6 = msat_make_plus(menv, xs[11], n_8_0)
expr7 = msat_make_plus(menv, xs[13], n_14_0)
expr8 = msat_make_plus(menv, xs[15], n_13_0)
expr9 = msat_make_plus(menv, xs[18], n_7_0)
expr10 = msat_make_plus(menv, xs[20], n_2_0)
expr11 = msat_make_plus(menv, xs[22], n_8_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[3], expr0),
msat_make_geq(menv, x_xs[3], expr1),
msat_make_geq(menv, x_xs[3], expr2),
msat_make_geq(menv, x_xs[3], expr3),
msat_make_geq(menv, x_xs[3], expr4),
msat_make_geq(menv, x_xs[3], expr5),
msat_make_geq(menv, x_xs[3], expr6),
msat_make_geq(menv, x_xs[3], expr7),
msat_make_geq(menv, x_xs[3], expr8),
msat_make_geq(menv, x_xs[3], expr9),
msat_make_geq(menv, x_xs[3], expr10),
msat_make_geq(menv, x_xs[3], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[3], expr0),
msat_make_equal(menv, x_xs[3], expr1),
msat_make_equal(menv, x_xs[3], expr2),
msat_make_equal(menv, x_xs[3], expr3),
msat_make_equal(menv, x_xs[3], expr4),
msat_make_equal(menv, x_xs[3], expr5),
msat_make_equal(menv, x_xs[3], expr6),
msat_make_equal(menv, x_xs[3], expr7),
msat_make_equal(menv, x_xs[3], expr8),
msat_make_equal(menv, x_xs[3], expr9),
msat_make_equal(menv, x_xs[3], expr10),
msat_make_equal(menv, x_xs[3], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_20_0)
expr1 = msat_make_plus(menv, xs[3], n_4_0)
expr2 = msat_make_plus(menv, xs[4], n_12_0)
expr3 = msat_make_plus(menv, xs[5], n_3_0)
expr4 = msat_make_plus(menv, xs[6], n_16_0)
expr5 = msat_make_plus(menv, xs[11], n_6_0)
expr6 = msat_make_plus(menv, xs[14], n_16_0)
expr7 = msat_make_plus(menv, xs[15], n_9_0)
expr8 = msat_make_plus(menv, xs[16], n_1_0)
expr9 = msat_make_plus(menv, xs[18], n_6_0)
expr10 = msat_make_plus(menv, xs[21], n_3_0)
expr11 = msat_make_plus(menv, xs[22], n_12_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[4], expr0),
msat_make_geq(menv, x_xs[4], expr1),
msat_make_geq(menv, x_xs[4], expr2),
msat_make_geq(menv, x_xs[4], expr3),
msat_make_geq(menv, x_xs[4], expr4),
msat_make_geq(menv, x_xs[4], expr5),
msat_make_geq(menv, x_xs[4], expr6),
msat_make_geq(menv, x_xs[4], expr7),
msat_make_geq(menv, x_xs[4], expr8),
msat_make_geq(menv, x_xs[4], expr9),
msat_make_geq(menv, x_xs[4], expr10),
msat_make_geq(menv, x_xs[4], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[4], expr0),
msat_make_equal(menv, x_xs[4], expr1),
msat_make_equal(menv, x_xs[4], expr2),
msat_make_equal(menv, x_xs[4], expr3),
msat_make_equal(menv, x_xs[4], expr4),
msat_make_equal(menv, x_xs[4], expr5),
msat_make_equal(menv, x_xs[4], expr6),
msat_make_equal(menv, x_xs[4], expr7),
msat_make_equal(menv, x_xs[4], expr8),
msat_make_equal(menv, x_xs[4], expr9),
msat_make_equal(menv, x_xs[4], expr10),
msat_make_equal(menv, x_xs[4], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_10_0)
expr1 = msat_make_plus(menv, xs[3], n_7_0)
expr2 = msat_make_plus(menv, xs[5], n_14_0)
expr3 = msat_make_plus(menv, xs[6], n_9_0)
expr4 = msat_make_plus(menv, xs[7], n_11_0)
expr5 = msat_make_plus(menv, xs[9], n_18_0)
expr6 = msat_make_plus(menv, xs[10], n_10_0)
expr7 = msat_make_plus(menv, xs[11], n_18_0)
expr8 = msat_make_plus(menv, xs[15], n_20_0)
expr9 = msat_make_plus(menv, xs[17], n_2_0)
expr10 = msat_make_plus(menv, xs[18], n_17_0)
expr11 = msat_make_plus(menv, xs[21], n_8_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[5], expr0),
msat_make_geq(menv, x_xs[5], expr1),
msat_make_geq(menv, x_xs[5], expr2),
msat_make_geq(menv, x_xs[5], expr3),
msat_make_geq(menv, x_xs[5], expr4),
msat_make_geq(menv, x_xs[5], expr5),
msat_make_geq(menv, x_xs[5], expr6),
msat_make_geq(menv, x_xs[5], expr7),
msat_make_geq(menv, x_xs[5], expr8),
msat_make_geq(menv, x_xs[5], expr9),
msat_make_geq(menv, x_xs[5], expr10),
msat_make_geq(menv, x_xs[5], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[5], expr0),
msat_make_equal(menv, x_xs[5], expr1),
msat_make_equal(menv, x_xs[5], expr2),
msat_make_equal(menv, x_xs[5], expr3),
msat_make_equal(menv, x_xs[5], expr4),
msat_make_equal(menv, x_xs[5], expr5),
msat_make_equal(menv, x_xs[5], expr6),
msat_make_equal(menv, x_xs[5], expr7),
msat_make_equal(menv, x_xs[5], expr8),
msat_make_equal(menv, x_xs[5], expr9),
msat_make_equal(menv, x_xs[5], expr10),
msat_make_equal(menv, x_xs[5], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_18_0)
expr1 = msat_make_plus(menv, xs[3], n_14_0)
expr2 = msat_make_plus(menv, xs[4], n_2_0)
expr3 = msat_make_plus(menv, xs[5], n_5_0)
expr4 = msat_make_plus(menv, xs[6], n_8_0)
expr5 = msat_make_plus(menv, xs[9], n_7_0)
expr6 = msat_make_plus(menv, xs[11], n_17_0)
expr7 = msat_make_plus(menv, xs[15], n_5_0)
expr8 = msat_make_plus(menv, xs[16], n_6_0)
expr9 = msat_make_plus(menv, xs[19], n_16_0)
expr10 = msat_make_plus(menv, xs[20], n_20_0)
expr11 = msat_make_plus(menv, xs[22], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[6], expr0),
msat_make_geq(menv, x_xs[6], expr1),
msat_make_geq(menv, x_xs[6], expr2),
msat_make_geq(menv, x_xs[6], expr3),
msat_make_geq(menv, x_xs[6], expr4),
msat_make_geq(menv, x_xs[6], expr5),
msat_make_geq(menv, x_xs[6], expr6),
msat_make_geq(menv, x_xs[6], expr7),
msat_make_geq(menv, x_xs[6], expr8),
msat_make_geq(menv, x_xs[6], expr9),
msat_make_geq(menv, x_xs[6], expr10),
msat_make_geq(menv, x_xs[6], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[6], expr0),
msat_make_equal(menv, x_xs[6], expr1),
msat_make_equal(menv, x_xs[6], expr2),
msat_make_equal(menv, x_xs[6], expr3),
msat_make_equal(menv, x_xs[6], expr4),
msat_make_equal(menv, x_xs[6], expr5),
msat_make_equal(menv, x_xs[6], expr6),
msat_make_equal(menv, x_xs[6], expr7),
msat_make_equal(menv, x_xs[6], expr8),
msat_make_equal(menv, x_xs[6], expr9),
msat_make_equal(menv, x_xs[6], expr10),
msat_make_equal(menv, x_xs[6], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_3_0)
expr1 = msat_make_plus(menv, xs[3], n_18_0)
expr2 = msat_make_plus(menv, xs[4], n_10_0)
expr3 = msat_make_plus(menv, xs[5], n_9_0)
expr4 = msat_make_plus(menv, xs[7], n_20_0)
expr5 = msat_make_plus(menv, xs[10], n_3_0)
expr6 = msat_make_plus(menv, xs[12], n_4_0)
expr7 = msat_make_plus(menv, xs[14], n_16_0)
expr8 = msat_make_plus(menv, xs[18], n_20_0)
expr9 = msat_make_plus(menv, xs[20], n_14_0)
expr10 = msat_make_plus(menv, xs[21], n_18_0)
expr11 = msat_make_plus(menv, xs[22], n_6_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[7], expr0),
msat_make_geq(menv, x_xs[7], expr1),
msat_make_geq(menv, x_xs[7], expr2),
msat_make_geq(menv, x_xs[7], expr3),
msat_make_geq(menv, x_xs[7], expr4),
msat_make_geq(menv, x_xs[7], expr5),
msat_make_geq(menv, x_xs[7], expr6),
msat_make_geq(menv, x_xs[7], expr7),
msat_make_geq(menv, x_xs[7], expr8),
msat_make_geq(menv, x_xs[7], expr9),
msat_make_geq(menv, x_xs[7], expr10),
msat_make_geq(menv, x_xs[7], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[7], expr0),
msat_make_equal(menv, x_xs[7], expr1),
msat_make_equal(menv, x_xs[7], expr2),
msat_make_equal(menv, x_xs[7], expr3),
msat_make_equal(menv, x_xs[7], expr4),
msat_make_equal(menv, x_xs[7], expr5),
msat_make_equal(menv, x_xs[7], expr6),
msat_make_equal(menv, x_xs[7], expr7),
msat_make_equal(menv, x_xs[7], expr8),
msat_make_equal(menv, x_xs[7], expr9),
msat_make_equal(menv, x_xs[7], expr10),
msat_make_equal(menv, x_xs[7], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_10_0)
expr1 = msat_make_plus(menv, xs[2], n_2_0)
expr2 = msat_make_plus(menv, xs[3], n_14_0)
expr3 = msat_make_plus(menv, xs[4], n_6_0)
expr4 = msat_make_plus(menv, xs[10], n_8_0)
expr5 = msat_make_plus(menv, xs[11], n_3_0)
expr6 = msat_make_plus(menv, xs[12], n_14_0)
expr7 = msat_make_plus(menv, xs[14], n_4_0)
expr8 = msat_make_plus(menv, xs[16], n_15_0)
expr9 = msat_make_plus(menv, xs[18], n_5_0)
expr10 = msat_make_plus(menv, xs[20], n_3_0)
expr11 = msat_make_plus(menv, xs[23], n_8_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[8], expr0),
msat_make_geq(menv, x_xs[8], expr1),
msat_make_geq(menv, x_xs[8], expr2),
msat_make_geq(menv, x_xs[8], expr3),
msat_make_geq(menv, x_xs[8], expr4),
msat_make_geq(menv, x_xs[8], expr5),
msat_make_geq(menv, x_xs[8], expr6),
msat_make_geq(menv, x_xs[8], expr7),
msat_make_geq(menv, x_xs[8], expr8),
msat_make_geq(menv, x_xs[8], expr9),
msat_make_geq(menv, x_xs[8], expr10),
msat_make_geq(menv, x_xs[8], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[8], expr0),
msat_make_equal(menv, x_xs[8], expr1),
msat_make_equal(menv, x_xs[8], expr2),
msat_make_equal(menv, x_xs[8], expr3),
msat_make_equal(menv, x_xs[8], expr4),
msat_make_equal(menv, x_xs[8], expr5),
msat_make_equal(menv, x_xs[8], expr6),
msat_make_equal(menv, x_xs[8], expr7),
msat_make_equal(menv, x_xs[8], expr8),
msat_make_equal(menv, x_xs[8], expr9),
msat_make_equal(menv, x_xs[8], expr10),
msat_make_equal(menv, x_xs[8], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_7_0)
expr1 = msat_make_plus(menv, xs[3], n_15_0)
expr2 = msat_make_plus(menv, xs[4], n_2_0)
expr3 = msat_make_plus(menv, xs[5], n_1_0)
expr4 = msat_make_plus(menv, xs[10], n_19_0)
expr5 = msat_make_plus(menv, xs[11], n_12_0)
expr6 = msat_make_plus(menv, xs[13], n_10_0)
expr7 = msat_make_plus(menv, xs[15], n_14_0)
expr8 = msat_make_plus(menv, xs[18], n_16_0)
expr9 = msat_make_plus(menv, xs[19], n_14_0)
expr10 = msat_make_plus(menv, xs[20], n_8_0)
expr11 = msat_make_plus(menv, xs[21], n_12_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[9], expr0),
msat_make_geq(menv, x_xs[9], expr1),
msat_make_geq(menv, x_xs[9], expr2),
msat_make_geq(menv, x_xs[9], expr3),
msat_make_geq(menv, x_xs[9], expr4),
msat_make_geq(menv, x_xs[9], expr5),
msat_make_geq(menv, x_xs[9], expr6),
msat_make_geq(menv, x_xs[9], expr7),
msat_make_geq(menv, x_xs[9], expr8),
msat_make_geq(menv, x_xs[9], expr9),
msat_make_geq(menv, x_xs[9], expr10),
msat_make_geq(menv, x_xs[9], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[9], expr0),
msat_make_equal(menv, x_xs[9], expr1),
msat_make_equal(menv, x_xs[9], expr2),
msat_make_equal(menv, x_xs[9], expr3),
msat_make_equal(menv, x_xs[9], expr4),
msat_make_equal(menv, x_xs[9], expr5),
msat_make_equal(menv, x_xs[9], expr6),
msat_make_equal(menv, x_xs[9], expr7),
msat_make_equal(menv, x_xs[9], expr8),
msat_make_equal(menv, x_xs[9], expr9),
msat_make_equal(menv, x_xs[9], expr10),
msat_make_equal(menv, x_xs[9], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_15_0)
expr1 = msat_make_plus(menv, xs[2], n_14_0)
expr2 = msat_make_plus(menv, xs[3], n_4_0)
expr3 = msat_make_plus(menv, xs[4], n_20_0)
expr4 = msat_make_plus(menv, xs[5], n_3_0)
expr5 = msat_make_plus(menv, xs[8], n_18_0)
expr6 = msat_make_plus(menv, xs[9], n_5_0)
expr7 = msat_make_plus(menv, xs[10], n_20_0)
expr8 = msat_make_plus(menv, xs[11], n_10_0)
expr9 = msat_make_plus(menv, xs[12], n_1_0)
expr10 = msat_make_plus(menv, xs[14], n_15_0)
expr11 = msat_make_plus(menv, xs[15], n_20_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[10], expr0),
msat_make_geq(menv, x_xs[10], expr1),
msat_make_geq(menv, x_xs[10], expr2),
msat_make_geq(menv, x_xs[10], expr3),
msat_make_geq(menv, x_xs[10], expr4),
msat_make_geq(menv, x_xs[10], expr5),
msat_make_geq(menv, x_xs[10], expr6),
msat_make_geq(menv, x_xs[10], expr7),
msat_make_geq(menv, x_xs[10], expr8),
msat_make_geq(menv, x_xs[10], expr9),
msat_make_geq(menv, x_xs[10], expr10),
msat_make_geq(menv, x_xs[10], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[10], expr0),
msat_make_equal(menv, x_xs[10], expr1),
msat_make_equal(menv, x_xs[10], expr2),
msat_make_equal(menv, x_xs[10], expr3),
msat_make_equal(menv, x_xs[10], expr4),
msat_make_equal(menv, x_xs[10], expr5),
msat_make_equal(menv, x_xs[10], expr6),
msat_make_equal(menv, x_xs[10], expr7),
msat_make_equal(menv, x_xs[10], expr8),
msat_make_equal(menv, x_xs[10], expr9),
msat_make_equal(menv, x_xs[10], expr10),
msat_make_equal(menv, x_xs[10], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_11_0)
expr1 = msat_make_plus(menv, xs[4], n_3_0)
expr2 = msat_make_plus(menv, xs[5], n_16_0)
expr3 = msat_make_plus(menv, xs[7], n_10_0)
expr4 = msat_make_plus(menv, xs[9], n_20_0)
expr5 = msat_make_plus(menv, xs[10], n_13_0)
expr6 = msat_make_plus(menv, xs[11], n_6_0)
expr7 = msat_make_plus(menv, xs[13], n_10_0)
expr8 = msat_make_plus(menv, xs[15], n_8_0)
expr9 = msat_make_plus(menv, xs[19], n_20_0)
expr10 = msat_make_plus(menv, xs[21], n_19_0)
expr11 = msat_make_plus(menv, xs[22], n_14_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[11], expr0),
msat_make_geq(menv, x_xs[11], expr1),
msat_make_geq(menv, x_xs[11], expr2),
msat_make_geq(menv, x_xs[11], expr3),
msat_make_geq(menv, x_xs[11], expr4),
msat_make_geq(menv, x_xs[11], expr5),
msat_make_geq(menv, x_xs[11], expr6),
msat_make_geq(menv, x_xs[11], expr7),
msat_make_geq(menv, x_xs[11], expr8),
msat_make_geq(menv, x_xs[11], expr9),
msat_make_geq(menv, x_xs[11], expr10),
msat_make_geq(menv, x_xs[11], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[11], expr0),
msat_make_equal(menv, x_xs[11], expr1),
msat_make_equal(menv, x_xs[11], expr2),
msat_make_equal(menv, x_xs[11], expr3),
msat_make_equal(menv, x_xs[11], expr4),
msat_make_equal(menv, x_xs[11], expr5),
msat_make_equal(menv, x_xs[11], expr6),
msat_make_equal(menv, x_xs[11], expr7),
msat_make_equal(menv, x_xs[11], expr8),
msat_make_equal(menv, x_xs[11], expr9),
msat_make_equal(menv, x_xs[11], expr10),
msat_make_equal(menv, x_xs[11], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_16_0)
expr1 = msat_make_plus(menv, xs[4], n_18_0)
expr2 = msat_make_plus(menv, xs[6], n_13_0)
expr3 = msat_make_plus(menv, xs[7], n_4_0)
expr4 = msat_make_plus(menv, xs[8], n_10_0)
expr5 = msat_make_plus(menv, xs[9], n_8_0)
expr6 = msat_make_plus(menv, xs[12], n_4_0)
expr7 = msat_make_plus(menv, xs[15], n_9_0)
expr8 = msat_make_plus(menv, xs[19], n_1_0)
expr9 = msat_make_plus(menv, xs[20], n_12_0)
expr10 = msat_make_plus(menv, xs[22], n_4_0)
expr11 = msat_make_plus(menv, xs[23], n_19_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[12], expr0),
msat_make_geq(menv, x_xs[12], expr1),
msat_make_geq(menv, x_xs[12], expr2),
msat_make_geq(menv, x_xs[12], expr3),
msat_make_geq(menv, x_xs[12], expr4),
msat_make_geq(menv, x_xs[12], expr5),
msat_make_geq(menv, x_xs[12], expr6),
msat_make_geq(menv, x_xs[12], expr7),
msat_make_geq(menv, x_xs[12], expr8),
msat_make_geq(menv, x_xs[12], expr9),
msat_make_geq(menv, x_xs[12], expr10),
msat_make_geq(menv, x_xs[12], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[12], expr0),
msat_make_equal(menv, x_xs[12], expr1),
msat_make_equal(menv, x_xs[12], expr2),
msat_make_equal(menv, x_xs[12], expr3),
msat_make_equal(menv, x_xs[12], expr4),
msat_make_equal(menv, x_xs[12], expr5),
msat_make_equal(menv, x_xs[12], expr6),
msat_make_equal(menv, x_xs[12], expr7),
msat_make_equal(menv, x_xs[12], expr8),
msat_make_equal(menv, x_xs[12], expr9),
msat_make_equal(menv, x_xs[12], expr10),
msat_make_equal(menv, x_xs[12], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_6_0)
expr1 = msat_make_plus(menv, xs[5], n_16_0)
expr2 = msat_make_plus(menv, xs[6], n_9_0)
expr3 = msat_make_plus(menv, xs[7], n_13_0)
expr4 = msat_make_plus(menv, xs[8], n_14_0)
expr5 = msat_make_plus(menv, xs[9], n_13_0)
expr6 = msat_make_plus(menv, xs[11], n_7_0)
expr7 = msat_make_plus(menv, xs[13], n_9_0)
expr8 = msat_make_plus(menv, xs[15], n_1_0)
expr9 = msat_make_plus(menv, xs[19], n_19_0)
expr10 = msat_make_plus(menv, xs[21], n_7_0)
expr11 = msat_make_plus(menv, xs[23], n_14_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[13], expr0),
msat_make_geq(menv, x_xs[13], expr1),
msat_make_geq(menv, x_xs[13], expr2),
msat_make_geq(menv, x_xs[13], expr3),
msat_make_geq(menv, x_xs[13], expr4),
msat_make_geq(menv, x_xs[13], expr5),
msat_make_geq(menv, x_xs[13], expr6),
msat_make_geq(menv, x_xs[13], expr7),
msat_make_geq(menv, x_xs[13], expr8),
msat_make_geq(menv, x_xs[13], expr9),
msat_make_geq(menv, x_xs[13], expr10),
msat_make_geq(menv, x_xs[13], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[13], expr0),
msat_make_equal(menv, x_xs[13], expr1),
msat_make_equal(menv, x_xs[13], expr2),
msat_make_equal(menv, x_xs[13], expr3),
msat_make_equal(menv, x_xs[13], expr4),
msat_make_equal(menv, x_xs[13], expr5),
msat_make_equal(menv, x_xs[13], expr6),
msat_make_equal(menv, x_xs[13], expr7),
msat_make_equal(menv, x_xs[13], expr8),
msat_make_equal(menv, x_xs[13], expr9),
msat_make_equal(menv, x_xs[13], expr10),
msat_make_equal(menv, x_xs[13], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_19_0)
expr1 = msat_make_plus(menv, xs[6], n_4_0)
expr2 = msat_make_plus(menv, xs[7], n_8_0)
expr3 = msat_make_plus(menv, xs[9], n_11_0)
expr4 = msat_make_plus(menv, xs[10], n_14_0)
expr5 = msat_make_plus(menv, xs[11], n_20_0)
expr6 = msat_make_plus(menv, xs[12], n_16_0)
expr7 = msat_make_plus(menv, xs[15], n_15_0)
expr8 = msat_make_plus(menv, xs[17], n_15_0)
expr9 = msat_make_plus(menv, xs[21], n_10_0)
expr10 = msat_make_plus(menv, xs[22], n_7_0)
expr11 = msat_make_plus(menv, xs[23], n_17_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[14], expr0),
msat_make_geq(menv, x_xs[14], expr1),
msat_make_geq(menv, x_xs[14], expr2),
msat_make_geq(menv, x_xs[14], expr3),
msat_make_geq(menv, x_xs[14], expr4),
msat_make_geq(menv, x_xs[14], expr5),
msat_make_geq(menv, x_xs[14], expr6),
msat_make_geq(menv, x_xs[14], expr7),
msat_make_geq(menv, x_xs[14], expr8),
msat_make_geq(menv, x_xs[14], expr9),
msat_make_geq(menv, x_xs[14], expr10),
msat_make_geq(menv, x_xs[14], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[14], expr0),
msat_make_equal(menv, x_xs[14], expr1),
msat_make_equal(menv, x_xs[14], expr2),
msat_make_equal(menv, x_xs[14], expr3),
msat_make_equal(menv, x_xs[14], expr4),
msat_make_equal(menv, x_xs[14], expr5),
msat_make_equal(menv, x_xs[14], expr6),
msat_make_equal(menv, x_xs[14], expr7),
msat_make_equal(menv, x_xs[14], expr8),
msat_make_equal(menv, x_xs[14], expr9),
msat_make_equal(menv, x_xs[14], expr10),
msat_make_equal(menv, x_xs[14], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_8_0)
expr1 = msat_make_plus(menv, xs[1], n_11_0)
expr2 = msat_make_plus(menv, xs[3], n_7_0)
expr3 = msat_make_plus(menv, xs[4], n_11_0)
expr4 = msat_make_plus(menv, xs[5], n_6_0)
expr5 = msat_make_plus(menv, xs[8], n_15_0)
expr6 = msat_make_plus(menv, xs[9], n_5_0)
expr7 = msat_make_plus(menv, xs[11], n_9_0)
expr8 = msat_make_plus(menv, xs[13], n_17_0)
expr9 = msat_make_plus(menv, xs[14], n_18_0)
expr10 = msat_make_plus(menv, xs[15], n_10_0)
expr11 = msat_make_plus(menv, xs[19], n_3_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[15], expr0),
msat_make_geq(menv, x_xs[15], expr1),
msat_make_geq(menv, x_xs[15], expr2),
msat_make_geq(menv, x_xs[15], expr3),
msat_make_geq(menv, x_xs[15], expr4),
msat_make_geq(menv, x_xs[15], expr5),
msat_make_geq(menv, x_xs[15], expr6),
msat_make_geq(menv, x_xs[15], expr7),
msat_make_geq(menv, x_xs[15], expr8),
msat_make_geq(menv, x_xs[15], expr9),
msat_make_geq(menv, x_xs[15], expr10),
msat_make_geq(menv, x_xs[15], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[15], expr0),
msat_make_equal(menv, x_xs[15], expr1),
msat_make_equal(menv, x_xs[15], expr2),
msat_make_equal(menv, x_xs[15], expr3),
msat_make_equal(menv, x_xs[15], expr4),
msat_make_equal(menv, x_xs[15], expr5),
msat_make_equal(menv, x_xs[15], expr6),
msat_make_equal(menv, x_xs[15], expr7),
msat_make_equal(menv, x_xs[15], expr8),
msat_make_equal(menv, x_xs[15], expr9),
msat_make_equal(menv, x_xs[15], expr10),
msat_make_equal(menv, x_xs[15], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_11_0)
expr1 = msat_make_plus(menv, xs[5], n_19_0)
expr2 = msat_make_plus(menv, xs[8], n_2_0)
expr3 = msat_make_plus(menv, xs[9], n_6_0)
expr4 = msat_make_plus(menv, xs[10], n_6_0)
expr5 = msat_make_plus(menv, xs[13], n_18_0)
expr6 = msat_make_plus(menv, xs[14], n_17_0)
expr7 = msat_make_plus(menv, xs[15], n_2_0)
expr8 = msat_make_plus(menv, xs[17], n_11_0)
expr9 = msat_make_plus(menv, xs[20], n_18_0)
expr10 = msat_make_plus(menv, xs[21], n_14_0)
expr11 = msat_make_plus(menv, xs[23], n_2_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[16], expr0),
msat_make_geq(menv, x_xs[16], expr1),
msat_make_geq(menv, x_xs[16], expr2),
msat_make_geq(menv, x_xs[16], expr3),
msat_make_geq(menv, x_xs[16], expr4),
msat_make_geq(menv, x_xs[16], expr5),
msat_make_geq(menv, x_xs[16], expr6),
msat_make_geq(menv, x_xs[16], expr7),
msat_make_geq(menv, x_xs[16], expr8),
msat_make_geq(menv, x_xs[16], expr9),
msat_make_geq(menv, x_xs[16], expr10),
msat_make_geq(menv, x_xs[16], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[16], expr0),
msat_make_equal(menv, x_xs[16], expr1),
msat_make_equal(menv, x_xs[16], expr2),
msat_make_equal(menv, x_xs[16], expr3),
msat_make_equal(menv, x_xs[16], expr4),
msat_make_equal(menv, x_xs[16], expr5),
msat_make_equal(menv, x_xs[16], expr6),
msat_make_equal(menv, x_xs[16], expr7),
msat_make_equal(menv, x_xs[16], expr8),
msat_make_equal(menv, x_xs[16], expr9),
msat_make_equal(menv, x_xs[16], expr10),
msat_make_equal(menv, x_xs[16], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_13_0)
expr1 = msat_make_plus(menv, xs[3], n_15_0)
expr2 = msat_make_plus(menv, xs[4], n_10_0)
expr3 = msat_make_plus(menv, xs[6], n_8_0)
expr4 = msat_make_plus(menv, xs[9], n_2_0)
expr5 = msat_make_plus(menv, xs[11], n_19_0)
expr6 = msat_make_plus(menv, xs[12], n_5_0)
expr7 = msat_make_plus(menv, xs[13], n_7_0)
expr8 = msat_make_plus(menv, xs[14], n_8_0)
expr9 = msat_make_plus(menv, xs[18], n_9_0)
expr10 = msat_make_plus(menv, xs[21], n_7_0)
expr11 = msat_make_plus(menv, xs[22], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[17], expr0),
msat_make_geq(menv, x_xs[17], expr1),
msat_make_geq(menv, x_xs[17], expr2),
msat_make_geq(menv, x_xs[17], expr3),
msat_make_geq(menv, x_xs[17], expr4),
msat_make_geq(menv, x_xs[17], expr5),
msat_make_geq(menv, x_xs[17], expr6),
msat_make_geq(menv, x_xs[17], expr7),
msat_make_geq(menv, x_xs[17], expr8),
msat_make_geq(menv, x_xs[17], expr9),
msat_make_geq(menv, x_xs[17], expr10),
msat_make_geq(menv, x_xs[17], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[17], expr0),
msat_make_equal(menv, x_xs[17], expr1),
msat_make_equal(menv, x_xs[17], expr2),
msat_make_equal(menv, x_xs[17], expr3),
msat_make_equal(menv, x_xs[17], expr4),
msat_make_equal(menv, x_xs[17], expr5),
msat_make_equal(menv, x_xs[17], expr6),
msat_make_equal(menv, x_xs[17], expr7),
msat_make_equal(menv, x_xs[17], expr8),
msat_make_equal(menv, x_xs[17], expr9),
msat_make_equal(menv, x_xs[17], expr10),
msat_make_equal(menv, x_xs[17], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_6_0)
expr1 = msat_make_plus(menv, xs[3], n_10_0)
expr2 = msat_make_plus(menv, xs[4], n_1_0)
expr3 = msat_make_plus(menv, xs[5], n_14_0)
expr4 = msat_make_plus(menv, xs[7], n_8_0)
expr5 = msat_make_plus(menv, xs[8], n_17_0)
expr6 = msat_make_plus(menv, xs[9], n_2_0)
expr7 = msat_make_plus(menv, xs[15], n_3_0)
expr8 = msat_make_plus(menv, xs[20], n_8_0)
expr9 = msat_make_plus(menv, xs[21], n_10_0)
expr10 = msat_make_plus(menv, xs[22], n_17_0)
expr11 = msat_make_plus(menv, xs[23], n_1_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[18], expr0),
msat_make_geq(menv, x_xs[18], expr1),
msat_make_geq(menv, x_xs[18], expr2),
msat_make_geq(menv, x_xs[18], expr3),
msat_make_geq(menv, x_xs[18], expr4),
msat_make_geq(menv, x_xs[18], expr5),
msat_make_geq(menv, x_xs[18], expr6),
msat_make_geq(menv, x_xs[18], expr7),
msat_make_geq(menv, x_xs[18], expr8),
msat_make_geq(menv, x_xs[18], expr9),
msat_make_geq(menv, x_xs[18], expr10),
msat_make_geq(menv, x_xs[18], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[18], expr0),
msat_make_equal(menv, x_xs[18], expr1),
msat_make_equal(menv, x_xs[18], expr2),
msat_make_equal(menv, x_xs[18], expr3),
msat_make_equal(menv, x_xs[18], expr4),
msat_make_equal(menv, x_xs[18], expr5),
msat_make_equal(menv, x_xs[18], expr6),
msat_make_equal(menv, x_xs[18], expr7),
msat_make_equal(menv, x_xs[18], expr8),
msat_make_equal(menv, x_xs[18], expr9),
msat_make_equal(menv, x_xs[18], expr10),
msat_make_equal(menv, x_xs[18], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_15_0)
expr1 = msat_make_plus(menv, xs[3], n_7_0)
expr2 = msat_make_plus(menv, xs[6], n_10_0)
expr3 = msat_make_plus(menv, xs[8], n_7_0)
expr4 = msat_make_plus(menv, xs[10], n_17_0)
expr5 = msat_make_plus(menv, xs[11], n_3_0)
expr6 = msat_make_plus(menv, xs[12], n_20_0)
expr7 = msat_make_plus(menv, xs[15], n_8_0)
expr8 = msat_make_plus(menv, xs[17], n_7_0)
expr9 = msat_make_plus(menv, xs[18], n_4_0)
expr10 = msat_make_plus(menv, xs[19], n_20_0)
expr11 = msat_make_plus(menv, xs[20], n_15_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[19], expr0),
msat_make_geq(menv, x_xs[19], expr1),
msat_make_geq(menv, x_xs[19], expr2),
msat_make_geq(menv, x_xs[19], expr3),
msat_make_geq(menv, x_xs[19], expr4),
msat_make_geq(menv, x_xs[19], expr5),
msat_make_geq(menv, x_xs[19], expr6),
msat_make_geq(menv, x_xs[19], expr7),
msat_make_geq(menv, x_xs[19], expr8),
msat_make_geq(menv, x_xs[19], expr9),
msat_make_geq(menv, x_xs[19], expr10),
msat_make_geq(menv, x_xs[19], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[19], expr0),
msat_make_equal(menv, x_xs[19], expr1),
msat_make_equal(menv, x_xs[19], expr2),
msat_make_equal(menv, x_xs[19], expr3),
msat_make_equal(menv, x_xs[19], expr4),
msat_make_equal(menv, x_xs[19], expr5),
msat_make_equal(menv, x_xs[19], expr6),
msat_make_equal(menv, x_xs[19], expr7),
msat_make_equal(menv, x_xs[19], expr8),
msat_make_equal(menv, x_xs[19], expr9),
msat_make_equal(menv, x_xs[19], expr10),
msat_make_equal(menv, x_xs[19], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[5], n_17_0)
expr1 = msat_make_plus(menv, xs[6], n_6_0)
expr2 = msat_make_plus(menv, xs[7], n_5_0)
expr3 = msat_make_plus(menv, xs[9], n_12_0)
expr4 = msat_make_plus(menv, xs[10], n_13_0)
expr5 = msat_make_plus(menv, xs[12], n_3_0)
expr6 = msat_make_plus(menv, xs[15], n_14_0)
expr7 = msat_make_plus(menv, xs[16], n_17_0)
expr8 = msat_make_plus(menv, xs[17], n_10_0)
expr9 = msat_make_plus(menv, xs[19], n_3_0)
expr10 = msat_make_plus(menv, xs[22], n_18_0)
expr11 = msat_make_plus(menv, xs[23], n_1_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[20], expr0),
msat_make_geq(menv, x_xs[20], expr1),
msat_make_geq(menv, x_xs[20], expr2),
msat_make_geq(menv, x_xs[20], expr3),
msat_make_geq(menv, x_xs[20], expr4),
msat_make_geq(menv, x_xs[20], expr5),
msat_make_geq(menv, x_xs[20], expr6),
msat_make_geq(menv, x_xs[20], expr7),
msat_make_geq(menv, x_xs[20], expr8),
msat_make_geq(menv, x_xs[20], expr9),
msat_make_geq(menv, x_xs[20], expr10),
msat_make_geq(menv, x_xs[20], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[20], expr0),
msat_make_equal(menv, x_xs[20], expr1),
msat_make_equal(menv, x_xs[20], expr2),
msat_make_equal(menv, x_xs[20], expr3),
msat_make_equal(menv, x_xs[20], expr4),
msat_make_equal(menv, x_xs[20], expr5),
msat_make_equal(menv, x_xs[20], expr6),
msat_make_equal(menv, x_xs[20], expr7),
msat_make_equal(menv, x_xs[20], expr8),
msat_make_equal(menv, x_xs[20], expr9),
msat_make_equal(menv, x_xs[20], expr10),
msat_make_equal(menv, x_xs[20], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_15_0)
expr1 = msat_make_plus(menv, xs[4], n_17_0)
expr2 = msat_make_plus(menv, xs[5], n_19_0)
expr3 = msat_make_plus(menv, xs[6], n_2_0)
expr4 = msat_make_plus(menv, xs[8], n_19_0)
expr5 = msat_make_plus(menv, xs[9], n_2_0)
expr6 = msat_make_plus(menv, xs[15], n_10_0)
expr7 = msat_make_plus(menv, xs[16], n_17_0)
expr8 = msat_make_plus(menv, xs[17], n_9_0)
expr9 = msat_make_plus(menv, xs[18], n_9_0)
expr10 = msat_make_plus(menv, xs[20], n_9_0)
expr11 = msat_make_plus(menv, xs[23], n_12_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[21], expr0),
msat_make_geq(menv, x_xs[21], expr1),
msat_make_geq(menv, x_xs[21], expr2),
msat_make_geq(menv, x_xs[21], expr3),
msat_make_geq(menv, x_xs[21], expr4),
msat_make_geq(menv, x_xs[21], expr5),
msat_make_geq(menv, x_xs[21], expr6),
msat_make_geq(menv, x_xs[21], expr7),
msat_make_geq(menv, x_xs[21], expr8),
msat_make_geq(menv, x_xs[21], expr9),
msat_make_geq(menv, x_xs[21], expr10),
msat_make_geq(menv, x_xs[21], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[21], expr0),
msat_make_equal(menv, x_xs[21], expr1),
msat_make_equal(menv, x_xs[21], expr2),
msat_make_equal(menv, x_xs[21], expr3),
msat_make_equal(menv, x_xs[21], expr4),
msat_make_equal(menv, x_xs[21], expr5),
msat_make_equal(menv, x_xs[21], expr6),
msat_make_equal(menv, x_xs[21], expr7),
msat_make_equal(menv, x_xs[21], expr8),
msat_make_equal(menv, x_xs[21], expr9),
msat_make_equal(menv, x_xs[21], expr10),
msat_make_equal(menv, x_xs[21], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_10_0)
expr1 = msat_make_plus(menv, xs[2], n_19_0)
expr2 = msat_make_plus(menv, xs[5], n_12_0)
expr3 = msat_make_plus(menv, xs[6], n_14_0)
expr4 = msat_make_plus(menv, xs[7], n_20_0)
expr5 = msat_make_plus(menv, xs[8], n_3_0)
expr6 = msat_make_plus(menv, xs[9], n_1_0)
expr7 = msat_make_plus(menv, xs[10], n_8_0)
expr8 = msat_make_plus(menv, xs[11], n_9_0)
expr9 = msat_make_plus(menv, xs[13], n_10_0)
expr10 = msat_make_plus(menv, xs[16], n_16_0)
expr11 = msat_make_plus(menv, xs[21], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[22], expr0),
msat_make_geq(menv, x_xs[22], expr1),
msat_make_geq(menv, x_xs[22], expr2),
msat_make_geq(menv, x_xs[22], expr3),
msat_make_geq(menv, x_xs[22], expr4),
msat_make_geq(menv, x_xs[22], expr5),
msat_make_geq(menv, x_xs[22], expr6),
msat_make_geq(menv, x_xs[22], expr7),
msat_make_geq(menv, x_xs[22], expr8),
msat_make_geq(menv, x_xs[22], expr9),
msat_make_geq(menv, x_xs[22], expr10),
msat_make_geq(menv, x_xs[22], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[22], expr0),
msat_make_equal(menv, x_xs[22], expr1),
msat_make_equal(menv, x_xs[22], expr2),
msat_make_equal(menv, x_xs[22], expr3),
msat_make_equal(menv, x_xs[22], expr4),
msat_make_equal(menv, x_xs[22], expr5),
msat_make_equal(menv, x_xs[22], expr6),
msat_make_equal(menv, x_xs[22], expr7),
msat_make_equal(menv, x_xs[22], expr8),
msat_make_equal(menv, x_xs[22], expr9),
msat_make_equal(menv, x_xs[22], expr10),
msat_make_equal(menv, x_xs[22], expr11),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_5_0)
expr1 = msat_make_plus(menv, xs[1], n_10_0)
expr2 = msat_make_plus(menv, xs[3], n_11_0)
expr3 = msat_make_plus(menv, xs[4], n_19_0)
expr4 = msat_make_plus(menv, xs[9], n_2_0)
expr5 = msat_make_plus(menv, xs[10], n_13_0)
expr6 = msat_make_plus(menv, xs[11], n_18_0)
expr7 = msat_make_plus(menv, xs[15], n_14_0)
expr8 = msat_make_plus(menv, xs[16], n_3_0)
expr9 = msat_make_plus(menv, xs[19], n_9_0)
expr10 = msat_make_plus(menv, xs[20], n_1_0)
expr11 = msat_make_plus(menv, xs[22], n_17_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[23], expr0),
msat_make_geq(menv, x_xs[23], expr1),
msat_make_geq(menv, x_xs[23], expr2),
msat_make_geq(menv, x_xs[23], expr3),
msat_make_geq(menv, x_xs[23], expr4),
msat_make_geq(menv, x_xs[23], expr5),
msat_make_geq(menv, x_xs[23], expr6),
msat_make_geq(menv, x_xs[23], expr7),
msat_make_geq(menv, x_xs[23], expr8),
msat_make_geq(menv, x_xs[23], expr9),
msat_make_geq(menv, x_xs[23], expr10),
msat_make_geq(menv, x_xs[23], expr11),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[23], expr0),
msat_make_equal(menv, x_xs[23], expr1),
msat_make_equal(menv, x_xs[23], expr2),
msat_make_equal(menv, x_xs[23], expr3),
msat_make_equal(menv, x_xs[23], expr4),
msat_make_equal(menv, x_xs[23], expr5),
msat_make_equal(menv, x_xs[23], expr6),
msat_make_equal(menv, x_xs[23], expr7),
msat_make_equal(menv, x_xs[23], expr8),
msat_make_equal(menv, x_xs[23], expr9),
msat_make_equal(menv, x_xs[23], expr10),
msat_make_equal(menv, x_xs[23], expr11),))
trans = msat_make_and(menv, trans, _t)
# ltl property: (X (F (G (F (x_3 - x_20 >= 1)))))
ltl = enc.make_X(enc.make_F(enc.make_G(enc.make_F(msat_make_geq(menv, msat_make_minus(menv, xs[3], xs[20]), msat_make_number(menv, "1"))))))
return TermMap(curr2next), init, trans, ltl
| [
"en.magnago@gmail.com"
] | en.magnago@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.