content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import os
import json
import sqlite3
import pandas as pd
# For PostgreSQL
import psycopg2 as psycho
from psycopg2.extras import execute_values
# .env for security -- load_dotenv necessary for python to actually go into
# .env folder and load the information
from dotenv import load_dotenv
load_dotenv()
# os filepath
DATABASE_FILEPATH = os.path.join(os.path.dirname(__file__), "rpg_db.sqlite3")
## SQLite Connection
lite_conn = sqlite3.connect(DATABASE_FILEPATH)
# Option to use row_factory
lite_conn.row_factory = sqlite3.Row
#print(type(conn))
## SQLite Cursor via lite_conn
lite_curs = lite_conn.cursor()
#print(type(curs))
## Queries
# How many total Characters are there?
query1 = """
SELECT *
FROM charactercreator_character
"""
# ANSWER:
lite_result = lite_curs.execute(query1).fetchall()
lite_var = [list(x) for x in lite_result]
#print(lite_var)
#print('\n')
##### Pandas #####
# Creating DataFrame
df = pd.DataFrame(lite_var, columns = ['id', 'names', 'level',
'exp', 'hp', 'strength',
'iq', 'dexterity', 'wisdom'])
#print(df.head())
##### SQL #####
#df.to_sql("rpg_table", con=engine, if_exists="replace", index=False)
## ** DON'T NEED THIS: to_sql takes care of this **
# #dtype={"id": "INTEGER",
# "names": "VARCHAR(50)",
# "level": "INTEGER",
# "exp": "INTEGER",
# "hp": "INTEGER",
# "strength": "INTEGER",
# "iq": "INTEGER",
# "dexterity":"INTEGER",
# "wisdom":"INTEGER"})
##### Titanic #####
from sqlalchemy import create_engine
DB_URL = os.getenv("DB_URL", default='OOPS')
engine = create_engine(DB_URL, echo=False)
## Read_csv of titanic
#titanic = pd.read_csv('titanic.csv')
## Converting to_sql
#titanic.to_sql('titanic_table', con=engine, if_exists="replace", index=False)
## Environment Variables for PostgreSQL
DB_NAME = os.getenv("DB_NAME", default='OOPS')
DB_USER = os.getenv("DB_USER", default="OOPS")
DB_PASSWORD = os.getenv("DB_PASSWORD", default='OOPS')
DB_HOST = os.getenv("DB_HOST", default='OOPS')
## PostgreSQL Connection Object
gres_conn = psycho.connect(dbname=DB_NAME, user=DB_USER,
password=DB_PASSWORD, host=DB_HOST)
#print(type(conn))
## PostgreSQL Cursor Object
gres_curs = gres_conn.cursor()
#print(type(curs))
### Queries ###
# query1
query1 = """
SELECT
"Sex"
,count("Sex") as Sex_count
,AVG("Age") as Sex_AVG_Age
,AVG("Fare") as Sex_AVG_Fare
FROM titanic_table
GROUP BY "Sex"
"""
# RESULTS: query1
result = gres_curs.execute(query1)
### ** for PostgreSQL, you CANNOT combine .execute() and .fetchall() ** ###
results = gres_curs.fetchall()
print('\n')
print(results)
print('\n')
# query2
query2 = """
SELECT
AVG("Survived") as Average_Survival_Rate
,AVG("Fare") as Average_Fare
,AVG("Age") as Average_Age
FROM titanic_table
"""
# RESULTS: query2
result = gres_curs.execute(query2)
results = gres_curs.fetchall()
print(results)
print('\n')
# query3
query3 = """
SELECT
"Pclass"
,count("Pclass") as Pclass_class
,AVG("Fare") as Pclass_Fare_AVG
FROM titanic_table
GROUP BY "Pclass"
"""
# RESULTS: query3
result = gres_curs.execute(query3)
results = gres_curs.fetchall()
print(results)
print('\n')
##### MIKE SOLUTION 1: #####
# import os
# #import json
# import pandas
# import numpy as np
# import psycopg2 as psycho
# from psycopg2.extras import execute_values
### *** Helps with changing numpy.int64 to int4 for SQL *** ###
# psycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)
# from dotenv import load_dotenv # python-dotenv
# load_dotenv() #> loads contents of the .env file into the script's environment
### READ PASSENGER DATA FROM THE CSV FILE
# #CSV_FILEPATH = "titanic.csv"
# #CSV_FILEPATH = os.path.join(os.path.dirname(__file__), "titanic.csv")
# CSV_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "module2-sql-for-analysis", "titanic.csv")
# df = pandas.read_csv(CSV_FILEPATH)
# print(df.dtypes)
# print(df.head())
### CONNECT TO THE PG DATABASE
# DB_NAME = os.getenv("DB_NAME", default="OOPS")
# DB_USER = os.getenv("DB_USER", default="OOPS")
# DB_PW = os.getenv("DB_PW", default="OOPS")
# DB_HOST = os.getenv("DB_HOST", default="OOPS")
# connection = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PW, host=DB_HOST)
# print(type(connection)) #> <class 'psycopg2.extensions.connection'>
# cursor = connection.cursor()
# print(type(cursor)) #> <class 'psycopg2.extensions.cursor'>
### CREATE A TABLE TO STORE THE PASSENGERS
# table_creation_sql = """
# DROP TABLE IF EXISTS passengers;
# CREATE TABLE IF NOT EXISTS passengers (
# id SERIAL PRIMARY KEY,
# "survived" int4, -- consider boolean here
# "pclass" int4,
# "name" text,
# "sex" text,
# "age" int4,
# "sib_spouse_count" int4,
# "parent_child_count" int4,
# "fare" float8
# );
# """
# cursor.execute(table_creation_sql)
### INSERT DATA INTO THE PASSENGERS TABLE
## how to convert dataframe to a list of tuples?
# list_of_tuples = list(df.to_records(index=False))
# insertion_query = f"INSERT INTO passengers (survived, pclass, name, sex, age, sib_spouse_count, parent_child_count, fare) VALUES %s"
# execute_values(cursor, insertion_query, list_of_tuples)
# connection.commit() # actually save the records / run the transaction to insert rows
# cursor.close()
# connection.close()
##### MIKE SOLUTION 2: #####
# to get over errors about not being able to work with the numpy integer datatypes
# could alternatively change the datatypes of our dataframe,
# ... or do transformations on our list of tuples later (after reading from the dataframe, before inserting into the table)
# psycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)
### .env Name, User, Password and Host variables
# DB_NAME = os.getenv("DB_NAME")
# DB_USER = os.getenv("DB_USER")
# DB_PASSWORD = os.getenv("DB_PASSWORD")
# DB_HOST = os.getenv("DB_HOST")
### Read_CSV via os filepath
# CSV_FILEPATH = os.path.join(os.path.dirname(__file__), "titanic.csv")
# df = pandas.read_csv(CSV_FILEPATH)
# df.index += 1 # to start index at 1 (resembling primary key behavior)
# print(df.head())
### Connection to PostgreSQL
# gres_conn = psycho.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST)
# print(type(gres_conn)) # <class 'psycopg2.extensions.connection'>
### Cursor to PostgreSQL
# gres_curs = connection.cursor()
# print(type(gres_cursor)) # <class 'psycopg2.extensions.cursor'>
### Querying SQL
# query = """SELECT * from test_table;"""
# cursor.execute(query)
# results = cursor.fetchall()
# print(type(results)) #> list
# print(results)
### Creating the table
## Table Creation Query
# table_creation_query = """
# DROP TABLE passengers;
# CREATE TABLE IF NOT EXISTS passengers (
# id SERIAL PRIMARY KEY,
# survived integer,
# pclass integer,
# name varchar NOT NULL,
# gender varchar NOT NULL,
# age float,
# sib_spouse_count integer,
# parent_child_count integer,
# fare float
# );
# """
# cursor.execute(table_creation_query)
## Converting df into a list of tuples
# list_of_tuples = list(df.to_records(index=True))
# sometimes would need to do further transformations (list comprehension,etc.)
## Creating insertion query
# insertion_query = """
# INSERT INTO passengers (id, survived, pclass, name, gender, age, sib_spouse_count, parent_child_count, fare) VALUES %s
# """
## Executing values
# execute_values(cursor, insertion_query, list_of_tuples)
## Saving results via commit
# connection.commit()
## Closing connection
# cursor.close()
# connection.close()
##### ALTERNATE SOLUTION: #####
## SQLite execution and check
#q1 = lite_curs.execute(get_first_table).fetchall()
#print(q1[0])
## read_sql and check
#armory_items = pd.read_sql(sql=get_first_table, con=lite_conn)
#print(armory_items)
## PostgreSQL connection and cursor
#gres_conn = psycho.connect(dbname=DB_NAME, user=DB_USER, password=DB_PW, host=DB_HOST)
#gres_curs = gres_conn.cursor()
## Creating the Table
#create_table = '''
#create table if not exists armory_items(
# item_id INTEGER NOT NULL PRIMARY KEY,
# name varchar(200),
# value INTEGER,
# weight INTEGER
#)
#'''
## Commit changes which actually creates the table
# gres_curs.execute(create_table)
# gres_curs.fetchall()
## ** NEED to break these two steps apart for PostgreSQL; **
## ** [ex.] can't be .execute(something).fetchall() **
# gres_conn.commit()
# # insertion query string
# insertion_query = f"INSERT INTO armory_items (item_id, name, value, weight) VALUES %s"
# # use insertion query above and q1 (first query), to insert table into postgresql
# execute_values(gres_curs, insertion_query, q1)
# gres_conn.commit()
# gres_curs.close()
# gres_conn.close() | [
11748,
28686,
220,
198,
11748,
33918,
198,
11748,
44161,
578,
18,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
2,
1114,
2947,
47701,
198,
11748,
17331,
22163,
70,
17,
355,
30731,
198,
6738,
17331,
22163,
70,
17,
13,
2302,
8847,
133... | 2.577045 | 3,459 |
rows = int(input())
x = [list(map(int, input().split())) for i in range(rows)]
print(x)
| [
8516,
796,
493,
7,
15414,
28955,
198,
87,
796,
685,
4868,
7,
8899,
7,
600,
11,
5128,
22446,
35312,
3419,
4008,
329,
1312,
287,
2837,
7,
8516,
15437,
198,
4798,
7,
87,
8,
198
] | 2.588235 | 34 |
import sys
from termcolor import cprint
from colorama import init
from pyfiglet import figlet_format
import pyperclip
cprint(figlet_format('Geometry', font='small'), 'blue', attrs=['bold', 'blink'])
cprint('==============================================', 'white', attrs=['blink'])
cprint('Scientific Calculator v.0.0.0', 'blue', attrs=['bold'])
cprint('==============================================', 'white', attrs=['blink'])
print() | [
11748,
25064,
201,
198,
6738,
3381,
8043,
1330,
269,
4798,
201,
198,
6738,
3124,
1689,
1330,
2315,
201,
198,
6738,
12972,
5647,
1616,
1330,
2336,
1616,
62,
18982,
201,
198,
11748,
12972,
525,
15036,
201,
198,
201,
198,
66,
4798,
7,
56... | 3.262774 | 137 |
from abc import ABC, abstractmethod
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
628
] | 4.111111 | 9 |
import argparse
import mmcv
from mmcv import Config
from gwd.datasets.wheat_detection import WheatDataset
from mmdet.datasets import build_dataset
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
198,
11748,
8085,
33967,
198,
6738,
8085,
33967,
1330,
17056,
198,
198,
6738,
308,
16993,
13,
19608,
292,
1039,
13,
12491,
265,
62,
15255,
3213,
1330,
34744,
27354,
292,
316,
198,
6738,
8085,
15255,
13,
19608,
2... | 2.808824 | 68 |
# pylint: disable=unused-argument, pointless-string-statement
import sqlite3
from pantam import JSONResponse, PlainTextResponse
class Index:
"""
TRY THIS: curl --request GET 'http://localhost:5000'
"""
def fetch_all(self, request):
"""Fetch all items"""
database = sqlite3.connect("db")
cursor = database.cursor()
cursor.execute("SELECT * FROM users")
return JSONResponse(cursor.fetchall())
"""
TRY THIS: curl --request GET 'http://localhost:5000/1'
"""
def fetch_single(self, request):
"""Fetch single item"""
database = sqlite3.connect("db")
cursor = database.cursor()
uid = request.path_params["id"]
cursor.execute("SELECT * FROM users WHERE uid=?", (uid))
return JSONResponse(cursor.fetchone())
"""
TRY THIS:
curl --request POST 'http://localhost:5000' \
--header 'Content-Type: application/x-www-form-urlencoded' \
--data-urlencode 'first_name=Homer' \
--data-urlencode 'last_name=Simpson' \
--data-urlencode 'email=homer@donut.me'
"""
async def create(self, request):
"""Create an item"""
database = sqlite3.connect("db")
cursor = database.cursor()
data = await request.form()
cursor.execute("SELECT COUNT(*) FROM users")
count = cursor.fetchone()[0]
cursor.execute(
"INSERT INTO users VALUES (?,?,?,?)",
(count + 1, data["first_name"], data["last_name"], data["email"]),
)
database.commit()
return PlainTextResponse("Created!")
"""
TRY THIS:
curl --request PATCH 'http://localhost:5000/1' \
--header 'Content-Type: application/x-www-form-urlencoded' \
--data-urlencode 'last_name=Flanders'
"""
async def update(self, request):
"""Update an item"""
database = sqlite3.connect("db")
database.row_factory = sqlite3.Row
cursor = database.cursor()
uid = request.path_params["id"]
cursor.execute("SELECT * FROM users WHERE uid=?", (uid))
user = cursor.fetchone()
if user is not None:
data = await request.form()
cursor.execute(
"UPDATE users set first_name = ?, last_name = ?, email = ? WHERE uid=?",
(
data.get("first_name", user["first_name"]),
data.get("last_name", user["last_name"]),
data.get("email", user["email"]),
user["uid"],
),
)
database.commit()
return PlainTextResponse("Updated!")
else:
return PlainTextResponse("User Not Found", status_code=404)
"""
TRY THIS: curl --request DELETE 'http://localhost:5000/1'
"""
def delete(self, request):
"""Delete single item"""
database = sqlite3.connect("db")
cursor = database.cursor()
uid = request.path_params["id"]
cursor.execute("DELETE FROM users WHERE uid=?", (uid))
database.commit()
return PlainTextResponse("Deleted!")
| [
2,
279,
2645,
600,
25,
15560,
28,
403,
1484,
12,
49140,
11,
27158,
12,
8841,
12,
26090,
198,
198,
11748,
44161,
578,
18,
198,
6738,
15857,
321,
1330,
19449,
31077,
11,
28847,
8206,
31077,
628,
198,
4871,
12901,
25,
628,
220,
220,
22... | 2.288222 | 1,367 |
from Google import Create_Service
from googleapiclient.http import MediaFileUpload
CLIENT_SECRET_FILE = 'client_secrets.json' #API File Add Here(client Secret File)
API_NAME = 'youtube'
API_VERSION = 'v3'
SCOPES = ['https://www.googleapis.com/auth/youtube.upload']
if __name__ == "__main__":
uploadOnYoutube() | [
6738,
3012,
1330,
13610,
62,
16177,
198,
6738,
23645,
499,
291,
75,
1153,
13,
4023,
1330,
6343,
8979,
41592,
198,
198,
5097,
28495,
62,
23683,
26087,
62,
25664,
796,
705,
16366,
62,
2363,
8004,
13,
17752,
6,
220,
1303,
17614,
9220,
30... | 2.925926 | 108 |
from silkyy.service import *
from silkyy.config import Config
from tornado.testing import AsyncHTTPTestCase
from silkyy.idworker import IdWorker
import os.path
import urllib
import logging
from six.moves.urllib.parse import urlencode
logger = logging.getLogger(__name__)
| [
6738,
3313,
2584,
88,
13,
15271,
1330,
1635,
201,
198,
6738,
3313,
2584,
88,
13,
11250,
1330,
17056,
201,
198,
6738,
33718,
13,
33407,
1330,
1081,
13361,
40717,
14402,
20448,
201,
198,
6738,
3313,
2584,
88,
13,
312,
28816,
1330,
5121,
... | 2.888889 | 99 |
from django.views import generic
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from .forms import RegistrationForm
| [
6738,
42625,
14208,
13,
33571,
1330,
14276,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
8323,
5344,
11,
17594,
198,
198,
6738,
764,
23914,
1330,
24610,
8479,
... | 4.047619 | 42 |
import pandas as pd
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import scale
import numpy as np
from sklearn.feature_extraction import DictVectorizer as DV
from sklearn.svm import SVC, LinearSVC
import csv
# Training data
# Data preparation with pandas
df1= pd.read_csv('train.csv')
df2= pd.read_csv('log_feature.csv')
df3=pd.read_csv('resource_type.csv')
df4=pd.read_csv('fault_type.csv')
df5=pd.read_csv('event_type.csv')
result = pd.merge(df1,df2, on='id')
result = pd.merge(result,df3, on='id')
result = pd.merge(result,df4, on='id')
result = pd.merge(result,df5, on='id')
#print(result.head())
result=result.drop_duplicates(keep='first')
#print(result.head())
X_train = result[['location','log_feature','resourve_type','type_of_faults','event_type']]
#print(x_train.head())
Y_train = result[['fault_severity']]
#print(y_train.head())
#print(X_train.shape)
#print(Y_train.shape)
# Testing data
d1= pd.read_csv('test.csv')
d2= pd.read_csv('log_feature.csv')
d3=pd.read_csv('resource_type.csv')
d4=pd.read_csv('fault_type.csv')
d5=pd.read_csv('event_type.csv')
result1 = pd.merge(d1,d2, on='id')
result1 = pd.merge(result1,d3, on='id')
result1 = pd.merge(result1,d4, on='id')
result1 = pd.merge(result1,d5, on='id')
result1=result1.drop_duplicates(keep='first')
#print(result1.head())
X_test = result1[['location','log_feature','resourve_type','type_of_faults','event_type']]
#print(X_test.shape)
#cat_train=X_train.head(20)
#cat_test=X_test.head(10)
#Y_train=Y_train.head(20)
#print(Y_train.shape)
#print(Y_train)
x_train = X_train.to_dict( orient = 'records' )
x_test = X_test.to_dict( orient = 'records' )
# vectorize
vectorizer = DV( sparse = False )
vec_x_train = vectorizer.fit_transform( x_train )
vec_x_test = vectorizer.transform( x_test )
log=LogisticRegression(penalty='l2',C=1,class_weight='balanced')
log.fit(vec_x_train,Y_train.values.ravel())
p = log.predict(vec_x_test)
with open("output.csv",'wb') as resultFile:
wr = csv.writer(resultFile, dialect='excel')
wr.writerows(p)
#LinearSVC_classifier.fit(vec_x_cat_train,Y_train.values.ravel())
#p=LinearSVC_classifier.predict_proba(vec_x_cat_test)
#print(p) | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
1341,
35720,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
5972,
2569,
8081,
2234,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
1881,
21352,
27195,
12342,
198,
6738,
1341,
35720,
13,
... | 2.419251 | 935 |
# encoding: utf-8
try:
from django.conf.urls import patterns, url
except ImportError:
from django.conf.urls.defaults import patterns, url # Django < 1.6
from blog.models import Post, Blog
from blog import settings
urlpatterns = patterns('blog.views',
url(r'^$', 'post_list', name='blog_post_list'),
url(r'^my_posts/$', 'my_post_list', name='blog_my_post_list'),
url(r'^add/$', 'post_add', name='blog_post_add'),
url(r'^edit/(?P<id>\d+)/$', 'post_edit', name='blog_post_edit'),
url(r'^delete/(?P<id>\d+)/$', 'post_delete', name='blog_post_delete'),
url(r'^(?P<action>draft|public)/(?P<id>\d+)/$', 'post_change_status',
name='blog_post_change_status'),
url(r'^post/(?P<username>[\w\._\-]+)/(?P<slug>[-\w]+)/$',
'user_post_detail', name='blog_user_post_detail')
)
if settings.ENABLE_USER_BLOG:
urlpatterns += patterns('blog.views',
url(r'^user/(?P<username>[\w\._\-]+)/$', 'user_post_list',
{'compact_view': False}, name='blog_user_post_list'),
url(r'^user/(?P<username>[\w\._\-]+)/compact/$', 'user_post_list',
{'compact_view': True}, name='blog_user_post_compact_list'),
)
if settings.ENABLE_BLOGS:
urlpatterns += patterns('blog.views',
url(r'^blogs/$', 'blog_list', name='blog_list'),
url(r'^(?P<blog_slug>[-\w]+)/(?P<slug>[-\w]+)/$', 'post_detail', name='blog_post_detail'),
url(r'^(?P<slug>[-\w]+)/$', 'blog_detail', name='blog_detail'),
)
| [
2,
21004,
25,
3384,
69,
12,
23,
201,
198,
28311,
25,
201,
198,
220,
220,
220,
422,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
19016,
201,
198,
16341,
17267,
12331,
25,
201,
198,
220,
220,
220,
422,
42625,
14208,
13,
10... | 2.127809 | 712 |
from Game import Piece, Position
P_gen = Position.Position
| [
6738,
3776,
1330,
27053,
11,
23158,
198,
198,
47,
62,
5235,
796,
23158,
13,
26545,
628,
628,
198
] | 3.555556 | 18 |
from hashlib import blake2s
| [
6738,
12234,
8019,
1330,
698,
539,
17,
82,
198
] | 3.111111 | 9 |
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
DOMAIN_CUSTOM_OPS_NAME = 'org.openvinotoolkit'
| [
2,
15069,
357,
34,
8,
12131,
12,
1238,
2481,
8180,
10501,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
39170,
29833,
62,
34,
7759,
2662,
62,
30737,
62,
20608,
796,
705,
2398,
13,
9654,
7... | 2.62 | 50 |
from typing import Any, Dict, List, Optional, Type
import torch
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from probnmn.config import Config
class _Evaluator(object):
r"""
A base class for generic evaluation of models. This class can have multiple models interacting
with each other, rather than a single model, which is suitable to our use-case (for example,
``module_training`` phase has two models:
:class:`~probnmn.models.program_generator.ProgramGenerator` and
:class:`~probnmn.models.nmn.NeuralModuleNetwork`). It offers full flexibility, with sensible
defaults which may be changed (or disabled) while extending this class.
Extended Summary
----------------
Extend this class and override :meth:`_do_iteration` method, with core evaluation loop - what
happens every iteration, given a ``batch`` from the dataloader this class holds.
Notes
-----
1. All models are `passed by assignment`, so they could be shared with an external trainer.
Do not set ``self._models = ...`` anywhere while extending this class.
2. An instantiation of this class will always be paired in conjunction to a
:class:`~probnmn.trainers._trainer._Trainer`. Pass the models of trainer class while
instantiating this class.
Parameters
----------
config: Config
A :class:`~probnmn.Config` object with all the relevant configuration parameters.
dataloader: torch.utils.data.DataLoader
A :class:`~torch.utils.data.DataLoader` which provides batches of evaluation examples. It
wraps one of :mod:`probnmn.data.datasets` depending on the evaluation phase.
models: Dict[str, Type[nn.Module]]
All the models which interact with each other for evaluation. These are one or more from
:mod:`probnmn.models` depending on the evaluation phase.
gpu_ids: List[int], optional (default=[0])
List of GPU IDs to use or evaluation, ``[-1]`` - use CPU.
"""
@property
def evaluate(self, num_batches: Optional[int] = None) -> Dict[str, Any]:
r"""
Perform evaluation using first ``num_batches`` of dataloader and return all evaluation
metrics from the models.
Parameters
----------
num_batches: int, optional (default=None)
Number of batches to use from dataloader. If ``None``, use all batches.
Returns
-------
Dict[str, Any]
Final evaluation metrics for all the models.
"""
# Switch all models to "eval" mode.
for model_name in self._models:
self._models[model_name].eval()
with torch.no_grad():
for iteration, batch in enumerate(tqdm(self._dataloader, desc="validation")):
for key in batch:
batch[key] = batch[key].to(self._device)
_ = self._do_iteration(batch)
if num_batches is not None and iteration > num_batches:
break
# keys: `self._models.keys()`
eval_metrics: Dict[str, Dict[str, Any]] = {}
for model_name in self._models:
# Get metrics recorded by a particular model. This `hasattr` check exists because
# it is a generic base class, all the models in `probnmn.models` implement a
# `get_metrics` method.
if hasattr(self._models[model_name], "get_metrics"):
# keys: names of metrics recorded by corresponding model.
eval_metrics[model_name] = self._models[model_name].get_metrics()
elif isinstance(self._models[model_name], nn.DataParallel):
if hasattr(self._models[model_name].module, "get_metrics"):
eval_metrics[model_name] = self._models[model_name].module.get_metrics()
# Switch all models back to "train" mode.
for model_name in self._models:
self._models[model_name].train()
return eval_metrics
def _do_iteration(self, batch: Dict[str, Any]) -> Dict[str, Any]:
r"""
Core evaluation logic for one iteration, operates on a batch. This base class has a dummy
implementation - just forward pass through some "model".
Parameters
----------
batch: Dict[str, Any]
A batch of evaluation examples sampled from dataloader. See :func:`evaluate` on how
this batch is sampled.
Returns
-------
Dict[str, Any]
An output dictionary typically returned by the models. This may contain predictions
from models, validation loss etc.
"""
output_dict = self._models["model"](batch)
return output_dict
| [
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
32233,
11,
5994,
198,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
... | 2.625276 | 1,812 |
#!/usr/bin/env python
#Author: James Nuttall (james@cromulence.co)
#Copyright (c) 2015 Cromulence LLC
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from generator.actions import Actions, Variable
import struct
import random
import string
import sys
from collections import OrderedDict
#lengths of fields, used for offsets
SIZEOF_HEADER = 8
SIZEOF_CHAPTER = 6
SIZEOF_ENTRY = 7
# offsets into 'titles' and 'stit' lists
TIT_NAME = 0
TIT_FMT = 1
TIT_REQ = 2
TIT_MULT = 3
# this matches service.h enum Format
# matches service.h enum Title
#class T():
# cnt = 7
# IDENTITY, MOVIES, VEHICLES, BOOK, SONG, JOBS, SPORTS = range(cnt)
#class ST():
# cnt = 9
# KAYAKING, STAMPS, COINS, KNIVES, SHOOTING, SOCCER, HOCKEY, TENNIS, BASKETBALL = range(cnt)
titles_done = [] # keep track of the titles done for this chapter (resets after each chapter)
titles = []
#title, (entry name, entry format, required?, multiple allowed?)
titles.append(('identity', ('first name',Format.STRING, True, False),
('middle name',Format.STRING, False, False),
('last name',Format.STRING, True, False),
('age',Format.CHAR, True, False),
('weight',Format.SHORT, False, False),
('birth country',Format.STRING, False, False),
('birth state',Format.STRING, False, False),
('birth city',Format.STRING, False, False),
('married',Format.BOOL, False, False)))
titles.append(('movies', ('title',Format.STRING, True, False),
('actor',Format.STRING, False, True),
('director',Format.STRING, False, True),
('producer',Format.STRING, False, True),
('year released',Format.SHORT, False, False),
('plot summary',Format.STRING, True, False)))
titles.append(('vehicles', ('make',Format.STRING, True, False),
('model',Format.STRING, True, False),
('color',Format.STRING, False, True),
('displacement',Format.SHORT, False, False),
('displacement units',Format.CHAR, False, False),
('doors',Format.CHAR, True, False)))
titles.append(('books', ('author',Format.STRING, True, True),
('year',Format.CHAR, False, False),
('summary',Format.STRING, False, False),
('publisher',Format.STRING, False, False),
('character',Format.STRING, False, True),
('made into a movie',Format.BOOL, True, False)))
titles.append(('songs', ('writer',Format.STRING, True, True),
('year',Format.CHAR, False, False),
('genre',Format.STRING, False, False),
('publisher',Format.STRING, False, False),
('free online',Format.BOOL, True, False)))
titles.append(('jobs', ('title',Format.STRING, True, True),
('years',Format.CHAR, False, False),
('job category',Format.STRING, False, False),
('company',Format.STRING, True, False),
('have a best friend',Format.BOOL, False, False),
('salary',Format.INT, True, False)))
titles.append(('hobbies', ('sports',Format.PTR, False, False),
('exercises',Format.PTR, False, False),
('stamps',Format.PTR, False, False),
('knives',Format.PTR, False, False),
('kayaking',Format.PTR, False, False),
('coins',Format.PTR, False, False),
('knives',Format.PTR, False, False)))
titles.append(('pets', ('name',Format.STRING, True, True),
('species',Format.STRING, True, False),
('legs',Format.CHAR, False, False),
('inside only',Format.BOOL, False, False),
('age',Format.CHAR, False, False),
('shots updated',Format.BOOL, True, False)))
# store the subentries for hobbies
stit = []
stit.append(('shooting', ('make',Format.STRING, True, True),
('model',Format.STRING, True, True),
('league',Format.STRING, False, False),
('length',Format.CHAR, False, False),
('length units',Format.STRING, False, False),
('years experience',Format.CHAR, True, False),
('injury',Format.BOOL, False, False)))
stit.append(('knives', ('make',Format.STRING, False, False),
('model',Format.STRING, True, False),
('value',Format.SHORT, False, False),
('style',Format.STRING, False, False),
('blade length',Format.CHAR, True, False),
('length units',Format.STRING, True, False),
('comes with sheath',Format.BOOL, False, False),
('injury',Format.BOOL, True, False)))
stit.append(('stamps', ('name',Format.STRING, True, False),
('value',Format.SHORT, True, False),
('seller',Format.STRING, False, False),
('mint',Format.BOOL, True, False),
('count',Format.CHAR, True, False)))
stit.append(('kayaking', ('make',Format.STRING, True, False),
('model',Format.STRING, True, False),
('length',Format.CHAR, False, False),
('style',Format.STRING, False, False),
('initial stability',Format.CHAR, False, False),
('years experience',Format.CHAR, False, False),
('highest class',Format.CHAR, False, False),
('touring',Format.BOOL, False, False),
('surfing',Format.BOOL, False, False),
('tricking',Format.BOOL, False, False),
('injury',Format.BOOL, False, False)))
stit.append(('coins', ('name',Format.STRING, True, False),
('seller',Format.STRING, False, False),
('value',Format.INT, True, False),
('mint',Format.BOOL, True, False),
('count',Format.CHAR, True, False)))
stit.append(('exercises', ('name',Format.STRING, True, True),
('max weight',Format.SHORT, True, False),
('reps',Format.CHAR, False, False),
('sets',Format.CHAR, True, False),
('injury',Format.BOOL, False, False)))
stit.append(('sports', ('name',Format.STRING, True, False),
('position',Format.STRING, True, True),
('years experience',Format.CHAR, True, False),
('injury',Format.BOOL, False, False)))
isSubEntry = False # like Hobby, where we choose from the subentry table
# return the string representation of a title
# given the title as string, return the entry as a string and int
# ret: name, value, format
# make temp copy of all titles
# remove them if they can't be multiples
acceptable_list = []
# use titles_done
# how many required fields are in this chapter?
# book has chapters in it
# chapters have titles and content
# offset_to_me is the file offset to the start of this entry
# title is the title of this chapter. all of the tags should be correct for this title type
# e.g. TITLE is Identity, TAGs are: first_name, mid_name, weight, birthdate, height, age
# if TITLE is hobby, TAGs are: name, years, total_cost, club_name,
# if TITLE is sport, TAGs are: sport_name, years_exp, position,
# TITLE books, TAGs: title, # in series, publisher, year released first, year released last, made into movie?, num pages total
# TITLE diary entry, TAGs: date, heading, summary, alcohol involved?, time, place, friends_involved
# TITLE guns, TAGs: make, model, caliber, units, range, terminal velocity, capacity, barrel length, grain of bullet, #grains of 230,
# TITLE music, TAGs: artist, year, song name, publisher, available on internet?,
# TITLE Martial art, TAGs: country of origin, year of start, style name, rank, years, instructor name,
# TITLE Movie, TAGs: name, director, # main actors, names of actors in list, summary, date released
# chapter contains arbitrary number of entries within it
# offset_to_me is the file offset when this chapter entry started
# if 'last' is True, this is the last chapter
# Main entry point here
# randomly generate lengths and entries
# generate the text response at the same time | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
13838,
25,
3700,
11959,
35429,
357,
73,
1047,
31,
66,
398,
32401,
13,
1073,
8,
198,
198,
2,
15269,
357,
66,
8,
1853,
39131,
32401,
11419,
198,
198,
2,
5990,
3411,
318,
2937... | 2.805777 | 3,012 |
#!/usr/bin/env python3
import asyncio
import websockets
import heroku
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
30351,
952,
198,
11748,
2639,
11603,
198,
198,
11748,
4293,
23063,
628,
198
] | 2.96 | 25 |
class RabbitMQConfig:
"""
rabbitMQ配置对象
"""
host: str = "127.0.0.1"
port: int = 5672
username: str = "guest"
password: str = "guest",
virtual_host: str = "/"
| [
4871,
25498,
49215,
16934,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
22746,
49215,
165,
227,
235,
163,
121,
106,
43380,
117,
164,
109,
94,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2583,
25,
965,
796,
366,
16799,
13,
... | 1.96875 | 96 |
from . import ImageModels
from . import BytesModels
| [
6738,
764,
1330,
7412,
5841,
1424,
198,
6738,
764,
1330,
2750,
4879,
5841,
1424,
628
] | 3.533333 | 15 |
from pathlib import Path
ENDPOINT_BASE = 'https://api.binance.com/api/v3/'
DATA_DIRECTORY = Path(__file__).resolve().parents[1] / 'data'
DATABASE_NAME = 'binance.sqlite3'
WEIGHT_DECAY = 15.0
NUM_WORKERS = 16
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
1677,
6322,
46,
12394,
62,
33,
11159,
796,
705,
5450,
1378,
15042,
13,
8800,
590,
13,
785,
14,
15042,
14,
85,
18,
14,
6,
198,
26947,
62,
17931,
23988,
15513,
796,
10644,
7,
834,
7753,
834,
... | 2.333333 | 90 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""Example code for manipulating libreoffice Calc."""
# standard imports
import numpy as np
from pathlib import Path
import sys
sys.path.append('/home/galdino/github/py-backpack')
import backpack.libremanip
import importlib
importlib.reload(backpack.libremanip)
from backpack.libremanip import soffice
# %%
try:
libreoffice.terminate(ask=False)
except: pass
libreoffice = soffice(norestore=True)
calcObject = libreoffice.calc()
calcObject.insert_sheets(name='heyy', position=1)
calcObject.insert_sheets(name=['Sheet2', 'Sheet5'], position=None)
calcObject.insert_sheets(name=['Sheet3', 'Sheet4'], position=4)
calcObject.get_sheets_count()
calcObject.get_sheets_name()
calcObject.remove_sheets_by_name(['heyy', 'Sheet5'])
calcObject.remove_sheets_by_position(4) # cannot remove more than one each time!
# calcObject.remove_sheets([1, 'Sheet1']) # will raise an ERROR!
calcObject.remove_sheets('Sheet3')
sheetObject = calcObject.get_sheets_by_name('Sheet1')
sheetObject, sheetObject2 = calcObject.get_sheets_by_name(['Sheet1', 'Sheet2'])
sheetObject, sheetObject2 = calcObject.get_sheets_by_position([1, 2])
sheetObject, sheetObject2 = calcObject.get_sheets([1, 'Sheet2'])
sheetObject.set_col_width(2500, 1)
sheetObject.set_col_width(2500, 'c')
sheetObject.set_col_width(2000, [2, 'D', 'e'])
print(sheetObject.get_col_width('b'))
print(sheetObject.get_col_width([2, 'D', 'z']))
sheetObject.set_row_height(800, 1)
sheetObject.set_row_height(800, [2, 3])
print(sheetObject.get_row_height(1))
print(sheetObject.get_row_height([1, 2, 6]))
sheetObject.set_row_values(['format', 'date', 'time', 'text', 'number', 'number as string', 'formula'], row=1)
sheetObject.set_col_values(['formula', 'string', 'number'], col=1, row_start=2)
sheetObject.set_cell_value(row=2, col=2, value='01/12/2016', format='formula')
sheetObject.set_cell_value(row=3, col=2, value='01/12/2016', format='string')
sheetObject.set_cell_value(row=4, col=2, value='01/12/2016', format='number')
sheetObject.set_cell_value(row=2, col=3, value='10:56', format='formula')
sheetObject.set_cell_value(row=3, col=3, value='10:56', format='string')
sheetObject.set_cell_value(row=4, col=3, value='10:56', format='number')
sheetObject.set_cell_value(row=2, col=4, value='heyy', format='formula')
sheetObject.set_cell_value(row=3, col=4, value='heyy', format='string')
# sheetObject.set_cell_value(row=4, col=4, value='heyy', format='number')
sheetObject.set_cell_value(row=4, col=4, value='ERROR', format='string')
sheetObject.set_cell_value(row=2, col=5, value=10.53, format='formula')
sheetObject.set_cell_value(row=3, col=5, value=10.53, format='string')
sheetObject.set_cell_value(row=4, col=5, value=10.53, format='number')
sheetObject.set_cell_value(row=2, col=6, value='10.53', format='formula')
sheetObject.set_cell_value(row=3, col=6, value='10.53', format='string')
sheetObject.set_cell_value(row=4, col=6, value='10.53', format='number')
sheetObject.set_cell_value(row=2, col=7, value='=F2*2', format='formula')
sheetObject.set_cell_value(row=3, col=7, value='=F2*2', format='string')
# sheetObject.set_cell_value(row=4, col=7, value='=F2*2', format='number')
sheetObject.set_cell_value(row=4, col=7, value='ERROR', format='string')
get_as_formula = sheetObject.get_cells_value(2, 1, 4, format='formula')
get_as_string = sheetObject.get_cells_value(2, 1, 4, format='string')
get_as_number = sheetObject.get_cells_value(2, 1, 4, format='number')
print(get_as_formula)
print(get_as_string)
print(get_as_number)
# set as formula unless date and time or if formulas must by writen as string (formula is nor evaluates)
# get as string
# set as string if date or time (numbers saved as string will be read as strings no matter what)
# get as string
# get as formula only if you need the non-evaluated string of a formula (number are read as strings)
# set and get as number only if other formats yield errors (format=formula will typically work fine for numbers)
# fake data
x = np.array([0,1,2,3,4,5,6,7,8,9,10])
y = x**2
y2 = x**3
y3 = x**4
data = np.zeros((len(x), 4))
data[:, 0] = x
data[:, 1] = y
data[:, 2] = y2
data[:, 3] = y3
# sending to sheet
sheetObject.set_row_values(['x', 'x**2', 'x**3', 'x**4'], row=6)
sheetObject.set_col_values(data[:, 0], 1, 7)
sheetObject.set_col_values(data[:, 1], 'b', 7)
sheetObject.set_cells_value(data[:, 2:], row_start=7, col_start=3)
dataFromSheet = sheetObject.get_cells_value(row_start=7, col_stop=4)
print(dataFromSheet)
dataFromSheet = np.array(dataFromSheet)
print(dataFromSheet)
# it also works with lists
data_aslist = data.tolist()
sheetObject.set_row_values(data=['x', 'x**2', 'x**3', 'x**4'], row=6, col_start='F')
sheetObject.set_cells_value(data=data_aslist, row_start=7, col_start='F')
# cell properties
sheetObject.list_cell_properties()
# some properties can easily
color = int('0xffff00', 16) # yellow
sheetObject.set_cell_property(property='CellBackColor', value=color, row=1, col=1)
colorObject, _ = sheetObject.get_cell_property('CellBackColor', 1, 1)
print(colorObject)
sheetObject.list_cell_properties(filter='border')
# some properties are tricky to change programatically
borderObject, subparameters = sheetObject.get_cell_property('BottomBorder', 2, 2)
# borderObject is a complex object
print(subparameters)
print(borderObject.Color)
print(borderObject.LineStyle)
print(borderObject.LineWidth)
borderObject.Color = int('0x301dde', 16)
borderObject.LineStyle = 2
borderObject.LineWidth = 100
sheetObject.set_cell_property(property='BottomBorder', value=borderObject, row=2, col=2)
# set many cells at once
sheetObject.set_cells_properties(property='CellBackColor', value=color, row_start=6, col_start=1, row_stop=6, col_stop='I')
colorObject, _ = sheetObject.get_cells_properties(property='CellBackColor', row_start=6, col_start=1, row_stop=6, col_stop='I')
# copy cell formatting
p_obj = sheetObject.get_cell_formating(row=1, col=1, extra=None)
sheetObject.set_cell_formating(p_obj, row=3, col=4, extra=None)
# copy to another sheet
sheet2, = calcObject.get_sheets('Sheet2')
p_obj = sheetObject.get_cell_formating(row=1, col=1, extra=None)
sheet2.set_cell_formating(p_obj, row=3, col=4, extra=None)
p_obj = sheet2.get_cell_formating(row=3, col=4, extra=None)
sheet2.set_cell_formating(p_obj, row=1, col=2, extra=None)
# copy whole formatting
object_formating = sheetObject.get_cells_formatting(row_start=1, col_start=1, extra=None)
sheet2.set_cells_formatting(object_formating, row_start=1, col_start=1, extra=None)
sheet2.set_cell_formating(object_formating[0][0], row=1, col=1, extra=None)
# copy values to another sheet
data = sheetObject.get_cells_value()
sheet2.set_cells_value(data)
# copy cell size
cols = np.arange(1, sheetObject.get_last_col()+1)
col_widths = sheetObject.get_col_width(cols)
for idx, _ in enumerate(cols):
sheet2.set_col_width(col_widths[idx], col=cols[idx])
rows = np.arange(1, sheetObject.get_last_row()+1)
row_height = sheetObject.get_row_height(rows)
for idx, _ in enumerate(rows):
sheet2.set_row_height(row_height[idx], row=rows[idx])
# save
calcObject.save('example')
# saving again does not require filename
calcObject.save()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
16281,
2438,
329,
29349,
9195,
260,
31810,
2199,
66,
526,
15931,
198,
198,
2,
3210,
17944,
198,
11748,
2... | 2.671997 | 2,689 |
def test_udp_service(self):
"""
Comprobacion de que el servicio asociado al protocolo coincide
Returns:
"""
port = Ports.objects.get(Tag="ssh")
udp = Udp.objects.get(id=port)
self.assertEqual(udp.get_service(), "ssh")
| [
4299,
1332,
62,
463,
79,
62,
15271,
7,
944,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
955,
1676,
65,
49443,
390,
8358,
1288,
37756,
952,
355,
1733,
4533,
435,
8435,
78,
37319,
198,
220,
220,
220,
16409,
25,
628,
220,
22... | 2.445545 | 101 |
# Generated by Django 3.2.5 on 2021-07-11 17:00
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
20,
319,
33448,
12,
2998,
12,
1157,
1596,
25,
405,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
"""
Reconstruction of image data from raw data.
"""
# TODO: define reconstruction algorithm independent device
# configurations, some reconstruction algorithms may choose to ignore
# some of that information, this way the number of electrodes and
# so on can be set as a free parameter!
# TODO: define an abstract interface for reconstruction algorithms.
from .worker import ReconstructionWorker
# for testing and debugging purposes below.
from .greit import GreitReconstruction
from .jac import JacReconstruction
from .bp import BpReconstruction
from .pyeit import mesh
from .pyeit.eit.utils import eit_scan_lines
from .pyeit.eit.greit import GREIT as greit
from .pyeit.eit.fem import Forward | [
37811,
198,
6690,
261,
15019,
286,
2939,
1366,
422,
8246,
1366,
13,
198,
37811,
198,
198,
2,
16926,
46,
25,
8160,
25056,
11862,
4795,
3335,
198,
2,
25412,
11,
617,
25056,
16113,
743,
3853,
284,
8856,
198,
2,
617,
286,
326,
1321,
11,... | 3.778378 | 185 |
from functools import reduce
| [
6738,
1257,
310,
10141,
1330,
4646,
628
] | 4.285714 | 7 |
#!/usr/bin/python
# -*- coding:utf-8 -*-
# *****************************************************************************
# * | File : epd5in65f.py
# * | Author : Waveshare team
# * | Function : Electronic paper driver
# * | Info :
# *----------------
# * | This version: V1.0
# * | Date : 2020-03-02
# # | Info : python demo
# -----------------------------------------------------------------------------
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documnetation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS OR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
from . import epdconfig
# Display resolution
EPD_WIDTH = 600
EPD_HEIGHT = 448
# Hardware reset
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
41906,
17174,
4557,
35625,
198,
2,
1635,
930,
9220,
220,
220,
220,
220,
220,
220,
220,
1058,
197,
220,
2462,
67,
20,
259,
... | 3.295635 | 504 |
# Generated by Django 2.2.12 on 2020-05-01 21:46
from django.db import migrations
CREATE_POSTGIS_FTW_SCHEMA = """
CREATE SCHEMA IF NOT EXISTS postgis_ftw
"""
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1065,
319,
12131,
12,
2713,
12,
486,
2310,
25,
3510,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628,
198,
43387,
6158,
62,
32782,
38,
1797,
62,
9792,
54,
62,
50,
3398,
2763... | 2.441176 | 68 |
# encoding=utf8
import os
import random
import numpy as np
import JediML.Utility as utility
import JediML.MLBase as ml
random.seed(1)
| [
2,
21004,
28,
40477,
23,
198,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
16147,
5805,
13,
18274,
879,
355,
10361,
198,
11748,
16147,
5805,
13,
5805,
14881,
355,
25962,
198,
198,
25120,
13,
28826... | 3 | 46 |
import cv2
import numpy as np
from fer import FER
if __name__ == '__main__':
main()
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11354,
1330,
376,
1137,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
12417,
3419,
198
] | 2.606061 | 33 |
# -*- coding:utf8 -*-
import json
import tkinter as tk
from tkinter import Toplevel, ttk
from tkinter.constants import BOTH, CENTER, E, LEFT, NE, NW, RIGHT, W, X, Y
import apikeysetting as aks
import frame_one
import frame_two
import frame_three
import frame_qr
import os
#初始化配置文件
aks.initializejson()
#用于存放表格的文件夹
if not ('Tables') in os.listdir():
os.mkdir("./Tables/")
#读取配置
with open('api.json', 'r') as f:
data = json.load(f)
text_value = int(data['text'])
translation_value = int(data['translation'])
table_value = int(data['table'])
math_value = int(data['math'])
#初始化窗口
win = tk.Tk()
win.title('落叶OCR')
#让窗口显示再屏幕中间
sw = win.winfo_screenwidth()
#得到屏幕宽度
sh = win.winfo_screenheight()
#得到屏幕高度
ww = 800
wh = 500
x = (sw-ww) / 2
y = (sh-wh) / 2
win.geometry("%dx%d+%d+%d" %(ww,wh,x,y))
win.minsize(800,500)
win.iconbitmap('.\\logo.ico')
#自定义样式
style = ttk.Style()
style.theme_create( "MyStyle", parent="xpnative", settings={"TNotebook": {"configure": {"tabmargins": [0, 0, 0, 0] } },"TNotebook.Tab": {"configure": {"padding": [79, 10],"font" : ('URW Gothic L', '11')},}})
style.theme_use("MyStyle")
#初始化四个选项卡
notebook = ttk.Notebook(win)
frameOne = tk.Frame()
frameTwo = tk.Frame()
frameThree = tk.Frame(bg='Ivory')
frameFour = tk.Frame()
notebook.add(frameOne, text='文字')
notebook.add(frameTwo, text='表格')
notebook.add(frameThree, text='公式')
notebook.add(frameFour, text='二维码')
notebook.pack(fill=tk.BOTH, expand=True)
#文本
frame_one.Frameoneset(frameOne,text_value,translation_value,win)
#表格
frame_two.Frametwoset(frameTwo,table_value,win)
#公式
frame_three.Framethreeset(frameThree,math_value,win)
#二维码
frameqr = tk.Frame(frameFour,width=800,height=225,bg='Azure')
frameqr.pack(fill=X)
frame_qr.Frameqrset(frameqr,win)
#about
framesetting = tk.Frame(frameFour,width=800,height=200,)
framesetting.pack(fill=BOTH,expand=True)
framesetleft = tk.Frame(framesetting,width=400,height=200,)
framesetleft.pack(side=LEFT,fill=BOTH,expand=True)
framesetright = tk.Frame(framesetting,width=400,height=200,)
framesetright.pack(side=RIGHT,fill=BOTH,expand=True)
ocrlable = tk.Label(framesetleft,text='项目地址:',font=('仿宋', 15), width=10, height=2)
ocrlable.pack(padx=15,pady=15,anchor=NW)
github = tk.Label(framesetleft,text='Github: https://github.com/lstoryzx/LYOCR',width=50,height=2)
github.pack(anchor=NW,padx=5,pady=10)
gitee = tk.Label(framesetleft,text='Gitee: https://gitee.com/lstoryzx/lyocr',width=50,height=2)
gitee.pack(anchor=NW,padx=5,pady=10)
apibutton = tk.Button(framesetright,text='API设置',font=('仿宋', 15), width=15, height=3,relief='groove',bg='Azure',activebackground='Azure',command=apisetting)
apibutton.pack(padx=20,pady=100)
win.mainloop()
| [
2,
532,
9,
12,
19617,
25,
40477,
23,
532,
9,
12,
198,
11748,
33918,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
256,
74,
3849,
1330,
309,
643,
626,
11,
256,
30488,
198,
6738,
256,
74,
3849,
13,
9979,
1187,
1330,
347,
269... | 2.083333 | 1,284 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Database abstraction layer. Simplyfies database
handling a bit.
An example of common usecase could be as such:
# Import the module
from databaselayer import database
# Create the database
myDB = database.Database('SQLite', 'database.sql')
# Create a table
myDB.execute(
'CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, username TEXT)'
)
# Insert a few people in the users table
myDB.insert('users', {'username': 'John'})
myDB.insert('users', {'username': 'Tom'})
"""
import threading
import sys
try:
import sqlite3
SQLITE = True
except ImportError:
# Fallback for sqlite3 (custom install)
try:
from pysqlite2 import dbapi2 as sqlite3
SQLITE = True
except ImportError:
SQLITE = False
try:
import MySQLdb
MYSQL = True
except ImportError:
MYSQL = False
class Database(threading.Thread):
"""
Higher level database abstraction layer.
Provides a database abstraction layer, for easy use with
multiple different database types, without the need to
think about SQL differences. If you want to execute raw SQL,
you can use the execute method.
Throughout the class, a lot of methods take in a filter argument.
The filter is in the format of {'field': 'value'}. The data
argument follows the same syntax.
The add argument is to add additional raw SQL to a constructed
query (e.g. add="ORDER BY time").
"""
def __init__(self, dbtype=None, dbname=None, dbserver=None, creden=None):
"""Sets the values for the database instance"""
threading.Thread.__init__(self)
try:
self.dbtype = dbtype
self.dbname = dbname
except NameError:
raise NameError('No database type or name specified!')
if dbserver is not None:
self.dbserver = dbserver
if creden is not None:
try:
self.user = creden['username']
except KeyError:
self.user = None
try:
self.passwd = creden['password']
except KeyError:
self.passwd = None
else:
self.user = None
self.passwd = None
self.temp_values = None
self.temp_insert_values = None
self.last_insert_id = None
self.conn = None
self.cursor = None
def connect(self):
"""Make the connection based on the type of database.
Types allowed:
SQLite
MySQL
"""
if SQLITE and self.dbtype == 'SQLite':
self.conn = sqlite3.connect(self.dbname)
self.cursor = self.conn.cursor()
elif MYSQL and self.dbtype == 'MySQL':
self.conn = MySQLdb.connect(host=self.dbserver, db=self.dbname,
user=self.user, passwd=self.passwd)
self.cursor = self.conn.cursor()
else:
raise NameError('No database available!')
def _keys_to_sql(self, keys=None, sep='AND '):
"""Construct the SQL filter from a dict"""
if keys is None:
keys = {}
filters = []
self.temp_values = ()
for field, value in list(keys.items()):
filters.append("%s = ? " % field)
self.temp_values = self.temp_values + (value,)
return sep.join(filters)
def _keys_to_insert_sql(self, keys=None, sep=', '):
"""Convert a dict into an SQL field value pair"""
if keys is None:
keys = {}
fields = []
values = []
self.temp_insert_values = ()
for field, value in list(keys.items()):
fields.append(field)
values.append('?')
self.temp_insert_values = self.temp_insert_values + (value,)
fields = '(' + sep.join(fields) + ') '
values = 'VALUES(' + sep.join(values) + ') '
return fields + values
def execute(self, sql=None):
"""Simply execute the given SQL"""
if sql is not None:
self.connect()
try:
self.cursor.execute(sql)
except sqlite3.OperationalError as error:
self.conn.rollback()
return 'SQL Error: %s' % error
else:
self.conn.commit()
self.cursor.close()
else:
raise NameError('There was no SQL to be parsed')
def rawfetch(self, sql=None, data=None, fetchall=True, out='none'):
"""Fetches all rows from the given SQL.
Arg [out] specifies what the output should be:
none : do nothing here (simply return)
output : send output to stdout
"""
if sql is not None:
self.connect()
try:
if data is None:
self.cursor.execute(sql)
else:
self.cursor.execute(sql, tuple(data))
except sqlite3.OperationalError as error:
self.conn.rollback()
if out == 'output':
write("Error running SQL: %s" % (sql,))
return 'SQL Error: %s' % error
else:
if out == 'output':
write("Successfully ran: %s" % (sql,))
# Cleanup and return
if fetchall:
result = self.cursor.fetchall()
else:
result = self.cursor.fetchone()
self.cursor.close()
return result
else:
raise NameError('There was no SQL to be parsed')
def fetchall(self, table=None, filters=None, add='', out='none'):
"""Fetches all rows from database based on the filters applied.
Arg [out] specifies what the output should be:
none : do nothing here (simply return)
output : send output to stdout
"""
append = ' WHERE '
if filters is None:
filters = {}
append = ''
if table is not None:
# Construct the SQL
sql = 'SELECT * FROM ' + table + append +\
self._keys_to_sql(filters)
self.connect()
try:
self.cursor.execute(sql + add, self.temp_values)
except sqlite3.OperationalError as error:
self.conn.rollback()
del self.temp_values
if out == 'output':
write("Error running SQL: %s" % (sql,))
return 'SQL Error: %s' % error
else:
if out == 'output':
write("Successfully ran: %s" % (sql,))
# Cleanup and return
del self.temp_values
result = self.cursor.fetchall()
self.cursor.close()
return result
else:
raise NameError('Table not specified!')
def fetchone(self, table=None, filters=None, out='none'):
"""Fetches the first row from database based on the filters applied.
Arg [out] specifies what the output should be:
none : do nothing here (simply return)
output : send output to stdout
"""
if filters is None:
filters = {}
if table is not None:
# Construct the SQL
sql = 'SELECT * FROM ' + table + ' WHERE ' +\
self._keys_to_sql(filters)
self.connect()
try:
self.cursor.execute(sql, self.temp_values)
except sqlite3.OperationalError as error:
del self.temp_values
self.conn.rollback()
if out == 'output':
write("Error running SQL: %s" % (sql,))
return 'SQL Error: %s' % error
else:
if out == 'output':
write("Successfully ran: %s" % (sql,))
# Cleanup and return
del self.temp_values
result = self.cursor.fetchone()
self.cursor.close()
return result
else:
raise NameError('Table not specified!')
def insert(self, table=None, data=None, out=None):
"""
Inserts specified data into the database
Arg [out] specifies what the output should be:
none : do nothing here (simply return)
output : send output to stdout
"""
if data is None:
data = {}
if table is not None:
sql = 'INSERT INTO ' + table + self._keys_to_insert_sql(data)
self.connect()
try:
self.cursor.execute(sql, self.temp_insert_values)
except sqlite3.OperationalError as error:
self.conn.rollback()
del self.temp_insert_values
if out == 'output':
write("Error running SQL: %s" % (sql,))
return 'SQL Error: %s' % error
else:
if out == 'output':
write("Successfully ran: %s" % (sql,))
write("With data : %s" % (self.temp_insert_values,))
del self.temp_insert_values
# TODO Fix the last insert id
# self.last_insert_id = self.cursor.lastrowid()
self.conn.commit()
self.cursor.close()
return True
else:
raise NameError('Table not specified!')
def update(self, table=None, data=None, filters=None, out=None):
"""
Updates rows where filters apply with, given data
Arg [out] specifies what the output should be:
none : do nothing here (simply return)
output : send output to stdout
"""
if data is None:
data = {}
if filters is None:
filters = {}
if table is not None:
values = []
data = self._keys_to_sql(data, sep=', ')
values = self.temp_values
if filters:
filters = ' WHERE ' + str(self._keys_to_sql(filters))
values = values + self.temp_values
else:
filters = ''
sql = 'UPDATE ' + table + ' SET ' + data + filters
self.connect()
try:
self.cursor.execute(sql, values)
except sqlite3.OperationalError as error:
self.conn.rollback()
del self.temp_values
if out == 'output':
write("Error running SQL: %s" % (sql,))
return 'SQL Error: %s' % error
else:
if out == 'output':
write("Successfully ran: %s" % (sql,))
del self.temp_values
# TODO Fix the last insert id
# self.last_insert_id = self.cursor.lastrowid()
self.conn.commit()
self.cursor.close()
return True
else:
raise NameError('Table not specified!')
def delete(self, table=None, filters=None):
"""Deletes rows where given filters apply"""
if filters is None:
filters = {}
if table is not None:
filters = self._keys_to_sql(filters)
sql = 'DELETE FROM ' + table + ' WHERE ' + filters
self.connect()
try:
self.cursor.execute(sql, self.temp_values)
except sqlite3.OperationalError as error:
self.conn.rollback()
del self.temp_values
return 'SQL Error: %s' % error
else:
del self.temp_values
self.conn.commit()
self.cursor.close()
return True
else:
raise NameError('Table not specified!')
def count(self, table=None, filters=None):
"""Counts the rows based on the given filters"""
if table is not None:
# Construct the SQL
sql = 'SELECT * FROM ' + table + ' WHERE '
sql += self._keys_to_sql(filters)
self.connect()
try:
self.cursor.execute(sql, self.temp_values)
except sqlite3.OperationalError as error:
self.conn.rollback()
del self.temp_values
return 'SQL Error: %s' % error
else:
# Cleanup and return
del self.temp_values
count = self.cursor.rowcount()
self.cursor.close()
if count < 0 or count is None:
count = 0
return count
else:
raise NameError('Table not specified!')
def write(text):
"""Handle the output from the IRC bot"""
text = str(text) + "\n"
sys.stdout.write(text)
sys.stdout.flush()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
38105,
34651,
7679,
13,
17973,
69,
444,
6831,
198,
4993,
1359,
257,
1643,
13,
198,
198,
2025,
1672,
286,
... | 1.982207 | 6,632 |
import os, shutil, shlex
from walt.common.tools import read_json
from walt.server.threads.main.network.tools import get_server_ip
from walt.server.tools import update_template
from walt.server.spec import get_server_features, SERVER_SPEC_PATH
from walt.common.tools import failsafe_makedirs
from plumbum.cmd import chroot
IMAGE_SPEC_PATH = '/etc/walt/image.spec'
| [
11748,
28686,
11,
4423,
346,
11,
427,
2588,
198,
6738,
266,
2501,
13,
11321,
13,
31391,
1330,
1100,
62,
17752,
198,
6738,
266,
2501,
13,
15388,
13,
16663,
82,
13,
12417,
13,
27349,
13,
31391,
1330,
651,
62,
15388,
62,
541,
198,
6738... | 3.058824 | 119 |
from __future__ import print_function
"""
Classes for methods that do analysis of miniature synaptic potentials
Current implementations are ClementsBekkers, AndradeJonas and zero=crossing
Test run timing:
cb: 0.175 s (with cython version of algorithm); misses overlapping events
aj: 0.028 s, plus gets overlapping events
July 2017
Note: all values are MKS (Seconds, plus Volts, Amps)
per acq4 standards...
Each method inherits the base class from MiniAnalyses, which provides support
of post-detection analysis.
"""
import numpy as np
import scipy.signal
from typing import Union, List
import timeit
import pyximport
from scipy.optimize import curve_fit
from numba import jit
import lmfit
import scipy as sp
import pylibrary.tools.digital_filters as dfilt
from pylibrary.tools.cprint import cprint
import ephys.mini_analyses.functions as FN # Luke's misc. function library
from ephys.mini_analyses import clembek # cythonized... pyx file
from ephys.mini_analyses.minis_methods_common import MiniAnalyses
pyximport.install()
@jit(nopython=False, parallel=False, cache=True)
def nb_clementsbekkers(data, template: Union[List, np.ndarray]):
"""
cb algorithm for numba jit.
"""
## Prepare a bunch of arrays we'll need later
n_template = len(template)
# if n_template <= 1:
# raise ValueError("nb_clementsbekkers: Length of template must be useful, and > 1")
n_data = data.shape[0]
n_dt = int(n_data - n_template)
# if n_dt < 10:
# raise ValueError("nb_clementsbekkers: n_dt, n_template", n_dt, n_template)
#
sum_template = template.sum()
sum_template_2 = (template * template).sum()
data_2 = data * data
sum_data = np.sum(data[:n_template])
sum_data_2 = data_2[:n_template].sum()
scale = np.zeros(n_dt)
offset = np.zeros(n_dt)
detcrit = np.zeros(n_dt)
for i in range(n_dt):
if i > 0:
sum_data = sum_data + data[i + n_template] - data[i - 1]
sum_data_2 = sum_data_2 + data_2[i + n_template] - data_2[i - 1]
sum_data_template_prod = np.multiply(data[i : i + n_template], template).sum()
scale[i] = (sum_data_template_prod - sum_data * sum_template / n_template) / (
sum_template_2 - sum_template * sum_template / n_template
)
offset[i] = (sum_data - scale[i] * sum_template) / n_template
fitted_template = template * scale[i] + offset[i]
sse = ((data[i : i + n_template] - fitted_template) ** 2).sum()
detcrit[i] = scale[i] / np.sqrt(sse / (n_template - 1))
return (scale, detcrit)
class ClementsBekkers(MiniAnalyses):
"""
Implements Clements-bekkers algorithm: slides template across data,
returns array of points indicating goodness of fit.
Biophysical Journal, 73: 220-229, 1997.d
We have 3 engines to use:
numba (using a just-in-time compiler)
cython (pre-compiled during setups
python (slow, direct implementation)
"""
def set_cb_engine(self, engine: str) -> None:
"""
Define which detection engine to use
cython requires compilation in advance in setup.py
Numba does a JIT compilation (see routine above)
"""
if engine in ["numba", "cython", "python"]:
self.engine = engine
else:
raise ValueError(f"CB detection engine must be one of python, numba or cython. Got{str(engine):s}")
def clements_bekkers(self, data: np.ndarray) -> None:
"""
External call point for all engines once parameters are
set up.
Parameters
----------
data : np.array (no default)
1D data array
"""
starttime = timeit.default_timer()
if self.template is None:
self._make_template()
## Strip out meta-data for faster computation
D = self.sign * data.view(np.ndarray)
if self.template is None:
self._make_template()
T = self.template.view(np.ndarray)
self.timebase = np.arange(0.0, data.shape[0] * self.dt_seconds, self.dt_seconds)
if self.engine == "numba":
self.Scale, self.Crit = nb_clementsbekkers(D, T)
# print('numba')
elif self.engine == "cython":
self.Scale, self.Crit = self.clements_bekkers_cython(D, T)
# print('cython')
elif self.engine == "python":
self.Scale, self.Crit = self.clements_bekkers_python(D, T)
else:
raise ValueError(
'Clements_Bekkers: computation engine unknown (%s); must be "python", "numba" or "cython"'
% self.engine
)
endtime = timeit.default_timer() - starttime
self.runtime = endtime
self.Crit = self.sign * self.Crit # assure that crit is positive
def clements_bekkers_numba(
self, data: np.ndarray, T: np.ndarray,
) -> (np.ndarray, np.ndarray, np.ndarray):
"""
Wrapper for numba implementation
"""
# print('Template len: ', self.template.shape, 'data: ', D.shape, 'max(t): ', np.max(self.timebase))
if np.std(D) < 5e-12: # no real data to do - so just return zeros.
DC = np.zeros(self.template.shape[0])
Scale = np.zeros(self.template.shape[0])
Crit = np.zeros(self.template.shape[0])
else:
DC, Scale, Crit = nb_clementsbekkers(D, T)
return DC, Scale, Crit
def clements_bekkers_python(self, D:np.ndarray, T:np.ndarray) ->(np.ndarray, np.ndarray, np.ndarray):
"""Implements Clements-bekkers algorithm: slides template across data,
returns array of points indicating goodness of fit.
Biophysical Journal, 73: 220-229, 1997.
Campagnola's version...
"""
starttime = timeit.default_timer()
# Strip out meta-data for faster computation
NDATA = len(D)
# Prepare a bunch of arrays we'll need later
N = len(T)
sumT = T.sum()
sumT2 = (T**2.0).sum()
sumD = self._rollingSum(D, N)
sumD2 = self._rollingSum(D**2.0, N)
sumTD = scipy.signal.correlate(D, T, mode='valid', method='direct')
# sumTD2 = np.zeros_like(sumD)
# for i in range(len(D)-N+1):
# sumTD2[i] = np.multiply(D[i : i + N], T).sum()
# print(np.mean(sumTD-sumTD2))
# compute scale factor, offset at each location:
## compute scale factor, offset at each location:
scale = (sumTD - sumT * sumD /N) / (sumT2 - sumT*sumT /N)
offset = (sumD - scale * sumT) /N
## compute SSE at every location
SSE = sumD2 + scale**2 * sumT2 + N * offset**2 - 2 * (scale*sumTD + offset*sumD - scale*offset*sumT)
## finally, compute error and detection criterion
stderror = np.sqrt(SSE / (N-1))
DetCrit = scale / stderror
endtime = timeit.default_timer() - starttime
self.runtime = endtime
# import matplotlib.pyplot as mpl
# mpl.plot(DetCrit)
# mpl.show()
# exit()
return scale, DetCrit
def identify_events(self,
data_nostim: Union[list, np.ndarray, None] = None,
outlier_scale: float = 10.0,
order: int = 11,
verbose: bool = False,
):
"""
Identify events. Criterion array should be 2D:
(trial number, criterion array)
"""
criterion = np.array(self.Criterion)
assert criterion.ndim == 2
if data_nostim is not None:
# clip to max of crit array, and be sure index array is integer, not float
for i in range(criterion.shape[0]):
criterion[i,:] = criterion[i, [int(x) for x in data_nostim if x < criterion.shape[1]]]
# compute an SD across the entire dataset (all traces)
# To do this remove "outliers" in a first pass
valid_data = np.zeros_like(criterion)
for i in range(criterion.shape[0]):
valid_data[i,:] = self.remove_outliers(criterion[i], outlier_scale)
sd = np.nanstd(valid_data)
self.sdthr = sd * self.threshold # set the threshold to multiple SD
self.onsets = [None]*criterion.shape[0]
for i in range(criterion.shape[0]):
self.above = np.clip(criterion[i], self.sdthr, None)
self.onsets[i] = (
scipy.signal.argrelextrema(self.above, np.greater, order=int(order))[0]
- 1
+ self.idelay
)
endtime = timeit.default_timer() - self.starttime
self.runtime = endtime
# self.summarize(self.data)
endtime = timeit.default_timer() - self.starttime
# import matplotlib.pyplot as mpl
# for i in range(criterion.shape[0]):
# mpl.plot(self.timebase, criterion[i])
# mpl.plot(self.onsets[i]*self.dt, self.sdthr*np.ones_like(self.onsets[i]), 'ro')
# mpl.plot([self.timebase[0], self.timebase[-1]], [self.sdthr, self.sdthr], 'r--')
# mpl.show()
# self.summarize(self.data)
if verbose:
print("CB run time: {0:.4f} s".format(endtime))
class AndradeJonas(MiniAnalyses):
"""
Deconvolution method of Andrade/Jonas, Biophysical Journal 2012
Create an instance of the class (aj = AndradeJonas())
call setup to instantiate the template and data detection sign (1 for positive, -1 for negative)
call deconvolve to perform the deconvolution
additional routines provide averaging and some event analysis and plotting
"""
def identify_events(self,
data_nostim: Union[list, np.ndarray, None] = None,
outlier_scale: float = 3.0,
order: int = 7,
verbose: bool = False,
):
"""
Identify events. Criterion array should be 2D:
(trial number, criterion array), thus
we use the global statistiscs of the set of traces
to do detection.
"""
criterion = np.array(self.Criterion)
assert criterion.ndim == 2
# criterion = criterion.reshape(1, -1) # make sure can be treated as a 2-d array
if data_nostim is not None:
# clip to max of crit array, and be sure index array is integer, not float
for i in range(criterion.shape[0]):
criterion[i,:] = criterion[i, [int(x) for x in data_nostim if x < criterion.shape[1]]]
# compute an SD across the entire dataset (all traces)
# To do this remove "outliers" in a first pass
valid_data = np.zeros_like(criterion)
for i in range(criterion.shape[0]):
valid_data[i,:] = self.remove_outliers(criterion[i], outlier_scale)
sd = np.nanstd(valid_data)
self.sdthr = sd * self.threshold # set the threshold to multiple SD
self.onsets = [None]*criterion.shape[0]
for i in range(criterion.shape[0]):
self.above = np.clip(criterion[i], self.sdthr, None)
self.onsets[i] = (
scipy.signal.argrelextrema(self.above, np.greater, order=int(order))[0]
- 1
+ self.idelay
)
endtime = timeit.default_timer() - self.starttime
self.runtime = endtime
endtime = timeit.default_timer() - self.starttime
if verbose:
print("AJ run time: {0:.4f} s".format(endtime))
class RSDeconvolve(MiniAnalyses):
"""Event finder using Richardson Silberberg Method, J. Neurophysiol. 2008
"""
def identify_events(self,
data_nostim: Union[list, np.ndarray, None] = None,
outlier_scale: float = 3.0,
order: int = 7,
verbose: bool = False,
):
"""
Identify events. Criterion array should be 2D:
(trial number, criterion array), thus
we use the global statistiscs of the set of traces
to do detection.
"""
criterion = np.array(self.Criterion)
assert criterion.ndim == 2
# criterion = criterion.reshape(1, -1) # make sure can be treated as a 2-d array
if data_nostim is not None:
# clip to max of crit array, and be sure index array is integer, not float
for i in range(criterion.shape[0]):
criterion[i,:] = criterion[i, [int(x) for x in data_nostim if x < criterion.shape[1]]]
# compute an SD across the entire dataset (all traces)
# To do this remove "outliers" in a first pass
valid_data = np.zeros_like(criterion)
for i in range(criterion.shape[0]):
valid_data[i,:] = self.remove_outliers(criterion[i], outlier_scale)
sd = np.nanstd(valid_data)
self.sdthr = sd * self.threshold # set the threshold to multiple SD
self.onsets = [None]*criterion.shape[0]
for i in range(criterion.shape[0]):
self.above = np.clip(criterion[i], self.sdthr, None)
self.onsets[i] = (
scipy.signal.argrelextrema(self.above, np.greater, order=int(order))[0]
- 1
+ self.idelay
)
endtime = timeit.default_timer() - self.starttime
self.runtime = endtime
endtime = timeit.default_timer() - self.starttime
if verbose:
print("RS run time: {0:.4f} s".format(endtime))
class ZCFinder(MiniAnalyses):
"""
Event finder using Luke's zero-crossing algorithm
"""
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
37811,
198,
9487,
274,
329,
5050,
326,
466,
3781,
286,
28685,
46679,
2785,
82,
198,
198,
11297,
25504,
389,
327,
3639,
33,
988,
15949,
11,
843,
27585,
18219,
292,
290,
6632,
28,... | 2.221425 | 6,133 |
from scipy.integrate import solve_ivp
import numpy as np
import matplotlib.pyplot as plt
from Orbit import Orbit
from Orbit_solver import *
###############################################################
# Begin problem
###############################################################
# initial conditions
a = 7571
incl = 87.9
RA = 180
e = 0.01
w = 180
TA = 0
# parameters
mu = 398600
RE = 6000
J2 = 1e-3
# evaluate h
h = np.sqrt(mu/a**3)*a*a*np.sqrt(1-e**2)
# t_span
t0 = 0
tf = 655600
init = Orbit([h, incl, RA, e, w, TA], 'keplerian', mu)
print(f'Orbit period : {init.getPeriod()} s')
# create data
data_kep = {'ic': init.getKep(),
't_span': [t0, tf],
'args': [mu, RE, J2]}
data_cart = {'ic': init.getCart(),
't_span': [t0, tf],
'args': [mu, RE, J2]}
# numerical integration
sol_kep = solve_orbit_kep(data_kep, dyn_kep, rtol=1e-6)
sol_cart = solve_orbit_kep(data_cart, dyn_cart, rtol=1e-6)
# evaluate orbit a time t
t = np.linspace(t0, tf, 1000)
orb_kep = sol_kep.sol(t)
orb_cart = sol_cart.sol(t)
orbit_kep = [Orbit(step, "keplerian", mu) for step in orb_kep.T]
orbit_cart = [Orbit(step, "cartesian", mu) for step in orb_cart.T]
R_kep = np.array([step.getCart() for step in orbit_kep]).T
R_cart = np.array([step.getCart() for step in orbit_cart]).T
# plot orbits
# fig_1 = plt.figure()
# ax_1 = plt.axes(projection='3d')
# ax_1.plot(R_kep[0, :], R_kep[1, :], R_kep[2, :])
# plt.title('Keplerian method')
#
# fig_2 = plt.figure()
# ax_2 = plt.axes(projection='3d')
# ax_2.plot(R_cart[0, :], R_cart[1, :], R_cart[2, :])
# plt.title('Cartesian method')
e_kep = np.array([step.getKepDict()['e'] for step in orbit_kep])
e_cart = np.array([step.getKepDict()['e'] for step in orbit_cart])
e_rel = np.abs(e_kep-e_cart)
fig = plt.figure()
plt.plot(t/(6556), e_rel)
plt.yscale('log')
plt.xlabel('time [T]')
plt.ylabel('|eCart - eGauss|')
plt.grid()
incl_kep = np.array([step.getKepDict()['incl'] for step in orbit_kep])
incl_cart = np.array([step.getKepDict()['incl'] for step in orbit_cart])
incl_rel = np.abs(incl_kep-incl_cart)/(360)
fig_i = plt.figure()
plt.plot(t/(6556), incl_rel*np.pi/180)
plt.yscale('log')
plt.xlabel('time [T]')
plt.ylabel('|iCart - iGauss|/2pi')
plt.grid()
RA_kep = np.array([step.getKepDict()['RA'] for step in orbit_kep])
RA_cart = np.array([step.getKepDict()['RA'] for step in orbit_cart])
RA_rel = np.abs(RA_kep-RA_cart)/(360)
fig_RA = plt.figure()
plt.plot(t/(6556), RA_rel*np.pi/180)
plt.yscale('log')
plt.xlabel('time [T]')
plt.ylabel('|RA_Cart - RA_Gauss|/2pi')
plt.grid()
fig_RA2 = plt.figure()
plt.plot(t/(6556), RA_kep, color='blue', label='Gauss')
plt.plot(t/(6556), RA_cart, color='red', label='Cartesian')
plt.yscale('log')
plt.xlabel('time [T]')
plt.ylabel('RA [deg]')
plt.grid()
w_kep = np.array([step.getKepDict()['w'] for step in orbit_kep])
w_cart = np.array([step.getKepDict()['w'] for step in orbit_cart])
w_rel = np.abs(w_kep-w_cart)/(360)
fig_w = plt.figure()
plt.plot(t/(6556), w_rel*np.pi/180)
plt.yscale('log')
plt.xlabel('time [T]')
plt.ylabel('|w_Cart - w_Gauss|/2pi')
plt.grid()
fig_w2 = plt.figure()
plt.plot(t/(6556), w_kep, color='blue', label='Gauss')
plt.plot(t/(6556), w_cart, color='red', label='Cartesian')
plt.yscale('log')
plt.xlabel('time [T]')
plt.ylabel('w [deg]')
plt.grid()
RA_dot_cart = np.array([-1.5*np.sqrt(mu)*J2*RE**2*np.cos(step.kep['incl']*np.pi/180)/
((1-step.kep['e']**2)**2*np.sqrt(step.getSemiMajorAxes())**7)
for step in orbit_cart])
RA_dot_kep = np.array([-1.5*np.sqrt(mu)*J2*RE**2*np.cos(step.kep['incl']*np.pi/180)/
((1-step.kep['e']**2)**2*np.sqrt(step.getSemiMajorAxes())**7)
for step in orbit_kep])
fig_RA_dot = plt.figure()
plt.plot(t/(6556), RA_dot_kep, color='blue', label='Gauss')
plt.plot(t/(6556), RA_dot_cart, color='red', label='Cartesian')
plt.xlabel('time [T]')
plt.ylabel('RA_dot [deg]')
plt.legend()
plt.grid()
plt.show()
| [
6738,
629,
541,
88,
13,
18908,
4873,
1330,
8494,
62,
452,
79,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
38161,
1330,
38161,
198,
198,
6738,
38161,
62,
82,
14375,
1330,... | 2.08971 | 1,895 |
# file to load the data, tokenize and update labels accordingly
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from transformers import AutoModel, AutoTokenizer, DataCollatorForTokenClassification, AutoModelForTokenClassification, TrainingArguments, Trainer
import pickle
import argparse
import random
import copy
# https://huggingface.co/transformers/v3.2.0/custom_datasets.html
# https://huggingface.co/docs/transformers/custom_datasets
SPECIAL_TOKEN_LABEL = -100
# max lengths creating C,A -> R data encoding
MAX_TOK_LEN = 448
MAX_ANS_LEN = 64
labels = [
"0",
"B-answer",
"I-answer",
]
labels_s = [
"0",
"B-sentence",
"I-sentence",
]
CRA_TOKENS = ['[BGN]', '[END]']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prepare dataset with labels')
# command-line arguments
parser.add_argument('data_path', type=str,
help='path to dataframe of pre-parsed data', action='store')
parser.add_argument('output_path', type=str,
help='path to output file where the parsed data will be stored', action='store')
parser.add_argument('--answers', dest='parse_answers', action='store_true')
parser.add_argument('--CRA', dest='CRA', action='store_true')
parser.add_argument('--CRA_tok_ignore', dest='CRA_tok_ignore', action='store_true')
parser.add_argument('--seed', dest='seed', type=int,
help='fix random seeds', action='store', default=42)
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
main(args)
| [
2,
2393,
284,
3440,
262,
1366,
11,
11241,
1096,
290,
4296,
14722,
16062,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
6121,
364,
133... | 2.705479 | 584 |
import numpy as np
import torch
from maskers.base_masker import BaseMasker
class PhaseMasker(BaseMasker):
"""Object for masking and demasking"""
@staticmethod
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
9335,
364,
13,
8692,
62,
27932,
263,
1330,
7308,
45195,
263,
628,
198,
4871,
18983,
45195,
263,
7,
14881,
45195,
263,
2599,
198,
220,
220,
220,
37227,
10267,
329,
9335,
278,
... | 3.207547 | 53 |
########## TOP LEVEL SIM SETUP ##########
meshfile: 'mesh/' + 'boeing_plane_final' # No file extension!
stepfile: 'mesh/boeing_plane_no_landing_gear.STEP'
case_select: 'Ex'
# umin: None # Fill these in with the max and min values of the potential when computing the external E field solutions
# umax: None
porder: 2
ndim: 3
solver: 'gmres'
solver_tol: 1e-7
outdir: 'out/'
vis_filename: 'boeing_plane_'+case_select
build_mesh: False
buildAF: False
compute_sol: False
call_pv: False
vis_filename: outdir+vis_filename
visorder: porder
viz_labels: {'scalars': {0: 'Potential', 1: 'x0'}, 'vectors': {0: 'Potential Gradient'}}
fuselage_dia: 3.76 # This is the fuselage of the 737 in m
# stabilizers: [20, 26, 51, 85, 72, 95, 34, 38, 87, 108, 97, 116]
# nose: [39, 78, 33, 48, 99, 118, 84, 106, 77, 100, 49, 83]
# fuselage: [107, 117, 122, 130, 131, 134]
# engines: [16, 17, 18, 19, 31, 32, 59, 60, 57, 58, 89, 90]
# wings: [121, 119, 101, 103, 79, 82, 41, 45, 27, 30, 6, 11, 2, 3, 132, 137, 126, 136, 123, 124, 109, 114, 88, 93, 56, 69, 35, 36]
# body_surfs: stabilizers + nose + fuselage + engines + wings
########## GEOMETRY SETUP ##########
pt_1_fuselage: np.array([8547.42, 1505.00, 5678.37])
pt_2_fuselage: np.array([8547.42, -1505.00, 5678.37])
r_fuselage_msh: np.linalg.norm(pt_1_fuselage-pt_2_fuselage)/2
scale_factor: fuselage_dia/r_fuselage_msh # Normalize mesh by the fuselage radius and rescale so that mesh dimensions are in meters
########## BCs ##########
surf_faces: np.arange(137)+1 # Faces are 1-indexed
x_minus_face: 138
x_plus_face: 139
y_minus_face: 140
y_plus_face: 141
z_minus_face: 142
z_plus_face: 143 | [
7804,
2235,
28662,
49277,
23749,
25823,
8577,
1303,
7804,
2,
198,
76,
5069,
7753,
25,
705,
76,
5069,
14,
6,
1343,
705,
2127,
68,
278,
62,
14382,
62,
20311,
6,
220,
220,
220,
220,
1303,
1400,
2393,
7552,
0,
198,
9662,
7753,
25,
705... | 2.419118 | 680 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, print_function,
unicode_literals, annotations)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
357,
21426,
11,
4112,
62,
11748,
11,
3601,
62,
8818,
11,
198,
220,
220,
220,
220,... | 2.293478 | 92 |
# ipop-project
# Copyright 2016, University of Florida
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
from controller.modules.NetworkGraph import EdgeTypesOut
OpType = ["OpTypeAdd", "OpTypeRemove", "OpTypeUpdate"]
| [
2,
20966,
404,
12,
16302,
198,
2,
15069,
1584,
11,
2059,
286,
4744,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428,
3788,
290,
3917,
10314,
3696,
357,
1169... | 3.847826 | 322 |
__author__ = 'dmorina'
from rest_framework import permissions
| [
834,
9800,
834,
796,
705,
67,
4491,
1437,
6,
198,
6738,
1334,
62,
30604,
1330,
21627,
628,
198
] | 3.555556 | 18 |
# coding: utf8
{
'Ability to customize the list of details tracked at a Shelter': 'Ability to customise the list of details tracked at a Shelter',
'Ability to customize the list of human resource tracked at a Shelter': 'Ability to customise the list of human resource tracked at a Shelter',
'Ability to customize the list of important facilities needed at a Shelter': 'Ability to customise the list of important facilities needed at a Shelter',
"Acronym of the organization's name, eg. IFRC.": "Acronym of the organisation's name, eg. IFRC.",
'Add all organizations which are involved in different roles in this project': 'Add all organisations which are involved in different roles in this project',
'Add Item to Catalog': 'Add Item to Catalogue',
'Add New Branch Organization': 'Add New Branch Organisation',
'Add New Organization': 'Add New Organisation',
'Add New Organization Domain': 'Add New Organisation Domain',
'Add New Organization Type': 'Add New Organisation Type',
'Add Organization': 'Add Organisation',
'Add Organization Domain': 'Add Organisation Domain',
'Add Organization to Project': 'Add Organisation to Project',
'Add Organization Type': 'Add Organisation Type',
'Add Partner Organization': 'Add Partner Organisation',
'Add Partner Organizations': 'Add Partner Organisations',
'Canceled': 'Cancelled',
'Cannot make an Organization a branch of itself!': 'Cannot make an Organisation a branch of itself!',
'Capturing the projects each organization is providing and where': 'Capturing the projects each organisation is providing and where',
'Catalog': 'Catalogue',
'Catalog added': 'Catalogue added',
'Catalog deleted': 'Catalogue deleted',
'Catalog Details': 'Catalogue Details',
'Catalog Item added': 'Catalogue Item added',
'Catalog Item deleted': 'Catalogue Item deleted',
'Catalog Item updated': 'Catalogue Item updated',
'Catalog Items': 'Catalogue Items',
'Catalog updated': 'Catalogue updated',
'Catalogs': 'Catalogues',
'Certificate Catalog': 'Certificate Catalogue',
'Certifying Organization': 'Certifying Organisation',
'Commitment Canceled': 'Commitment Cancelled',
'Competency Rating Catalog': 'Competency Rating Catalogue',
'Configure resources to synchronize, update methods and policies': 'Configure resources to synchronise, update methods and policies',
'Configure/Monitor Synchronization': 'Configure/Monitor Synchronisation',
'Confirming Organization': 'Confirming Organization',
'Course Catalog': 'Course Catalogue',
'Create New Catalog': 'Create New Catalogue',
'Create New Catalog Item': 'Create New Catalogue Item',
'Create new Organization': 'Create new Organisation',
'Credentialling Organization': 'Credentialling Organisation',
'Current Owned By (Organization/Branch)': 'Current Owned By (Organisation/Branch)',
'Delete Catalog': 'Delete Catalogue',
'Delete Catalog Item': 'Delete Catalogue Item',
'Delete Organization': 'Delete Organisation',
'Delete Organization Domain': 'Delete Organisation Domain',
'Delete Organization Type': 'Delete Organisation Type',
'Delete Partner Organization': 'Delete Partner Organisation',
'Department Catalog': 'Department Catalogue',
'Donating Organization': 'Donating Organisation',
'Edit Catalog': 'Edit Catalogue',
'Edit Catalog Item': 'Edit Catalogue Item',
'Edit Organization': 'Edit Organisation',
'Edit Organization Domain': 'Edit Organisation Domain',
'Edit Organization Type': 'Edit Organisation Type',
'Edit Partner Organization': 'Edit Partner Organisation',
'Edit Project Organization': 'Edit Project Organisation',
'Edit Synchronization Settings': 'Edit Synchronisation Settings',
'Enter your organization': 'Enter your organisation',
'From Organization': 'From Organisation',
'Funding Organization': 'Funding Organisation',
'Funds Contributed by this Organization': 'Funds Contributed by this Organisation',
'Hair Color': 'Hair Colour',
'Identifier which the repository identifies itself with when sending synchronization requests.': 'Identifier which the repository identifies itself with when sending synchronisation requests.',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": "If this field is populated then a user who specifies this Organisation when signing up will be assigned as a Staff of this Organisation unless their domain doesn't match the domain field.",
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organisation',
"If you don't see the Organization in the list, you can add a new one by clicking link 'Add Organization'.": "If you don't see the Organisation in the list, you can add a new one by clicking link 'Add Organisation'.",
'Import Organizations': 'Import Organisations',
'Import Partner Organizations': 'Import Partner Organisations',
'Import Project Organizations': 'Import Project Organisations',
'In Catalogs': 'In Catalogues',
'Intergovernmental Organization': 'Intergovernmental Organisation',
'International Organization': 'International Organisation',
'Item Catalog Details': 'Item Catalogue Details',
'Item Catalogs': 'Item Catalogues',
'Item Catalogues': 'Item Catalogues',
'Job Role Catalog': 'Job Role Catalogue',
'Job Title Catalog': 'Job Title Catalogue',
'Kit canceled': 'Kit cancelled',
'Last Synchronization': 'Last Synchronisation',
'Last synchronized on': 'Last synchronised on',
'Lead Organization': 'Lead Organisation',
'List All Organization Approvers & Whitelists': 'List All Organisation Approvers & Whitelists',
'List Organization Domains': 'List Organisation Domains',
'List Organization Types': 'List Organisation Types',
'List Organizations': 'List Organisations',
'List Partner Organizations': 'List Partner Organisations',
'List Project Organizations': 'List Project Organisations',
'Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400': 'Logo of the organisation. This should be a png or jpeg file and it should be no larger than 400x400',
'Manage Organization Contacts': 'Manage Organisation Contacts',
'Manage Organizations': 'Manage Organisations',
'Manual Synchronization': 'Manual Synchronisation',
'Matching Catalog Items': 'Matching Catalogue Items',
'Monetization': 'Monetisation',
'Monetization Report': 'Monetisation Report',
'No Catalog Items currently registered': 'No Catalogue Items currently registered',
'No Catalogs currently registered': 'No Catalogues currently registered',
'No Matching Catalog Items': 'No Matching Catalogue Items',
'No Organization Domains currently registered': 'No Organisation Domains currently registered',
'No Organization Types currently registered': 'No Organisation Types currently registered',
'No Organizations currently registered': 'No Organisations currently registered',
'No Organizations for this Project': 'No Organisations for this Project',
'No Partner Organizations currently registered': 'No Partner Organisations currently registered',
'Order canceled': 'Order cancelled',
'Organization': 'Organisation',
'Organization added': 'Organisation added',
'Organization added to Project': 'Organisation added to Project',
'Organization deleted': 'Organisation deleted',
'Organization Details': 'Organisation Details',
'Organization Domain added': 'Organisation Domain added',
'Organization Domain deleted': 'Organisation Domain deleted',
'Organization Domain Details': 'Organisation Domain Details',
'Organization Domain updated': 'Organisation Domain updated',
'Organization Domains': 'Organisation Domains',
'Organization Registry': 'Organisation Registry',
'Organization removed from Project': 'Organisation removed from Project',
'Organization Type': 'Organisation Type',
'Organization Type added': 'Organisation Type added',
'Organization Type deleted': 'Organisation Type deleted',
'Organization Type Details': 'Organisation Type Details',
'Organization Type updated': 'Organisation Type updated',
'Organization Types': 'Organisation Types',
'Organization Units': 'Organisation Units',
'Organization updated': 'Organisation updated',
'Organization(s)': 'Organisation(s)',
'Organization/Branch': 'Organisation/Branch',
'Organization/Supplier': 'Organisation/Supplier',
'Organizational Development': 'Organisational Development',
'Organizations': 'Organisations',
'Owned By (Organization/Branch)': 'Owned By (Organisation/Branch)',
'Owning Organization': 'Owning Organisation',
'Participating Organizations': 'Participating Organisations',
'Partner Organization': 'Partner Organisation',
'Partner Organization added': 'Partner Organisation added',
'Partner Organization deleted': 'Partner Organisation deleted',
'Partner Organization Details': 'Partner Organisation Details',
'Partner Organization updated': 'Partner Organisation updated',
'Partner Organizations': 'Partner Organisations',
"Phone number to donate to this organization's relief efforts.": "Phone number to donate to this organisation's relief efforts.",
'Please enter a %(site)s OR an Organization': 'Please enter a %(site)s OR an Organisation',
'Please enter an Organization/Supplier': 'Please enter an Organisation/Supplier',
'Position Catalog': 'Position Catalogue',
'Project Details including organizations': 'Project Details including organisations',
'Project Details including organizations and communities': 'Project Details including organisations and communities',
'Project Organization Details': 'Project Organisation Details',
'Project Organization updated': 'Project Organisation updated',
'Project Organizations': 'Project Organisations',
'Received Shipment canceled': 'Received Shipment cancelled',
'Request Canceled': 'Request Cancelled',
'Request for Donations Canceled': 'Request for Donations Cancelled',
'Request for Volunteers Canceled': 'Request for Volunteers Cancelled',
'Resource Mobilization': 'Resource Mobilisation',
'Schedule synchronization jobs': 'Schedule synchronisation jobs',
'Search by organization.': 'Search by organisation.',
'Search for an Organization by name or acronym': 'Search for an Organisation by name or acronym',
'Search for an Organization by name or acronym.': 'Search for an Organisation by name or acronym.',
'Search for office by organization or branch.': 'Search for office by organisation or branch.',
'Search for warehouse by organization.': 'Search for warehouse by organisation.',
'Search Organization Domains': 'Search Organisation Domains',
'Search Organization Types': 'Search Organisation Types',
'Search Organizations': 'Search Organisations',
'Search Partner Organizations': 'Search Partner Organisations',
'Search Project Organizations': 'Search Project Organisations',
'Sent Shipment canceled': 'Sent Shipment cancelled',
'Sent Shipment canceled and items returned to Warehouse': 'Sent Shipment cancelled and items returned to Warehouse',
'Shipping Organization': 'Shipping Organisation',
'Specialized Hospital': 'Specialised Hospital',
'Synchronization': 'Synchronisation',
'Synchronization Job': 'Synchronisation Job',
'Synchronization Log': 'Synchronisation Log',
'Synchronization mode': 'Synchronisation mode',
'Synchronization Schedule': 'Synchronisation Schedule',
'Synchronization Settings': 'Synchronisation Settings',
'Synchronization settings updated': 'Synchronisation settings updated',
'Synchronize now': 'Synchronise now',
'The default Organization for whom this person is acting.': 'The default Organisation for whom this person is acting.',
'The default Organization for whom you are acting.': 'The default Organisation for whom you are acting.',
'The Organization Registry keeps track of all the relief organizations working in the area.': 'The Organisation Registry keeps track of all the relief organisations working in the area.',
'The synchronization module allows the synchronization of data resources between Sahana Eden instances.': 'The synchronisation module allows the synchronisation of data resources between Sahana Eden instances.',
'This shipment has already been received & subsequently canceled.': 'This shipment has already been received & subsequently cancelled.',
'This shipment has not been received - it has NOT been canceled because can still be edited.': 'This shipment has not been received - it has NOT been cancelled because can still be edited.',
'This shipment has not been sent - it has NOT been canceled because can still be edited.': 'This shipment has not been sent - it has NOT been cancelled because can still be edited.',
'To Organization': 'To Organisation',
'Training Course Catalog': 'Training Course Catalogue',
'Transfer Ownership To (Organization/Branch)': 'Transfer Ownership To (Organisation/Branch)',
"Type the name of an existing catalog item OR Click 'Add New Item' to add an item which is not in the catalog.": "Type the name of an existing catalogue item OR Click 'Add New Item' to add an item which is not in the catalogue.",
'Under which condition a local record shall be updated if it also has been modified locally since the last synchronization': 'Under which condition a local record shall be updated if it also has been modified locally since the last synchronisation',
'Unique identifier which THIS repository identifies itself with when sending synchronization requests.': 'Unique identifier which THIS repository identifies itself with when sending synchronisation requests.',
'User Guidelines Synchronization': 'User Guidelines Synchronisation',
'Utilization Report': 'Utilisation Report',
'Volunteer Role Catalog': 'Volunteer Role Catalogue',
'Year that the organization was founded': 'Year that the organisation was founded',
}
| [
2,
19617,
25,
3384,
69,
23,
198,
90,
198,
6,
22453,
284,
24184,
262,
1351,
286,
3307,
18283,
379,
257,
36507,
10354,
705,
22453,
284,
2183,
786,
262,
1351,
286,
3307,
18283,
379,
257,
36507,
3256,
198,
6,
22453,
284,
24184,
262,
135... | 4.489869 | 3,060 |
import tempfile
from contextlib import contextmanager
from dagster import check, job, op
from dagster.core.instance import DagsterInstance, InstanceRef, InstanceType
from dagster.core.launcher import DefaultRunLauncher
from dagster.core.run_coordinator import DefaultRunCoordinator
from dagster.core.storage.compute_log_manager import (
MAX_BYTES_FILE_READ,
ComputeLogFileData,
ComputeLogManager,
)
from dagster.core.storage.event_log import SqliteEventLogStorage
from dagster.core.storage.root import LocalArtifactStorage
from dagster.core.storage.runs import SqliteRunStorage
from dagster.core.test_utils import environ, instance_for_test
@contextmanager
| [
11748,
20218,
7753,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
198,
6738,
48924,
1706,
1330,
2198,
11,
1693,
11,
1034,
198,
6738,
48924,
1706,
13,
7295,
13,
39098,
1330,
32167,
1706,
33384,
11,
2262,
590,
8134,
11,
2262,
590,
6030,
... | 3.375 | 200 |
from nose.tools import eq_, ok_
from . import setup_postgres
from .test_basic import CustomModelView
from sqlalchemy.dialects.postgresql import HSTORE, JSON
| [
6738,
9686,
13,
31391,
1330,
37430,
62,
11,
12876,
62,
198,
198,
6738,
764,
1330,
9058,
62,
7353,
34239,
198,
6738,
764,
9288,
62,
35487,
1330,
8562,
17633,
7680,
198,
198,
6738,
44161,
282,
26599,
13,
38969,
478,
82,
13,
7353,
34239,... | 3.156863 | 51 |
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
class IqProtocolEntity(ProtocolEntity):
'''
<iq type="{{get | set}}" id="{{id}}" xmlns="{{xmlns}}" to="{{TO}}" from="{{FROM}}">
</iq>
'''
TYPE_SET = "set"
TYPE_GET = "get"
TYPE_ERROR = "error"
TYPE_RESULT = "result"
TYPE_DELETE = "delete"
TYPES = (TYPE_SET, TYPE_GET, TYPE_RESULT, TYPE_ERROR, TYPE_DELETE)
@staticmethod
| [
6738,
331,
1666,
929,
13,
7249,
82,
1330,
20497,
32398,
11,
20497,
27660,
19667,
198,
4871,
314,
80,
19703,
4668,
32398,
7,
19703,
4668,
32398,
2599,
628,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
1279,
25011,
2099,
2625,
27007,
1... | 2.320652 | 184 |
#!/usr/bin/env python3
import argparse
import psycopg2
import calendar
DBNAME = "news"
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Get information from news database'
)
parser.add_argument(
"querytype",
choices=[
'top-articles',
'top-authors',
'one-percent-error-days'
],
help="Query to run"
)
parser.add_argument(
'-n',
'--numrows',
help='number of rows to return',
type=int
)
args = parser.parse_args()
if(args.querytype == 'top-articles'):
articles_count = 0
if args.numrows:
articles_count = args.numrows
for article in get_top_articles(articles_count):
print('%s - %d views' % (article[0], article[1]))
elif(args.querytype == 'top-authors'):
authors_count = 0
if args.numrows:
authors_count = args.numrows
for authors in get_top_authors(authors_count):
print('%s - %d views' % (authors[0], authors[1]))
elif(args.querytype == 'one-percent-error-days'):
days_count = 0
if args.numrows:
days_count = args.numrows
for day in get_one_percent_error_days(days_count):
print('%s %s, %s - %f%% errors' %
(calendar.month_name[day[0].month],
day[0].day,
day[0].year,
day[1]))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
1822,
29572,
198,
11748,
17331,
22163,
70,
17,
198,
11748,
11845,
198,
11012,
20608,
796,
366,
10827,
1,
628,
628,
198,
198,
2,
2547,
325,
3141,
1627,
7159,
198,
48610,... | 1.921446 | 802 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Text RNN model stored as a SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
flags.DEFINE_string("export_dir", None, "Directory to export SavedModel.")
class TextRnnModel(tf.train.Checkpoint):
"""Text RNN model.
A full generative text RNN model that can train and decode sentences from a
starting word.
"""
@tf.function(input_signature=[tf.TensorSpec([None], tf.dtypes.string)])
@tf.function
if __name__ == "__main__":
app.run(main)
| [
2,
15069,
13130,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
201,
198,
2,
201,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
201,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 3.306763 | 414 |
from .device import (ORTDeviceInfo, get_available_devices_info,
get_cpu_device_info)
from .InferenceSession import InferenceSession_with_device
| [
6738,
764,
25202,
1330,
357,
9863,
24728,
12360,
11,
651,
62,
15182,
62,
42034,
62,
10951,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
651,
62,
36166,
62,
25202,
62,... | 2.704918 | 61 |
import pandas as pd
import numpy as np
from ultimate_data_wrangling import data_cleaning
import xgboost as xgb
from joblib import dump
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# Random number seed to get more reproduceable results
np.random.seed(32)
# Calling data cleaning function to provide us with the dataframe
retention_df = data_cleaning()
# Scaling dates to prepare them for model
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(retention_df[['signup_date', 'last_trip_date']].values)
retention_df[['signup_date', 'last_trip_date']] = scaled
# Setting up X and y from retention_df for model consumption
X = retention_df[[
'city', #1
'trips_in_first_30_days', #4
'signup_date', #8
'avg_rating_of_driver', #6
'avg_surge', #7
'phone', #1
'surge_pct', #2
'ultimate_black_user', #1
'weekday_pct', #3
'avg_dist', #5
'avg_rating_by_driver' #1
]]
y = retention_df['six_month_active']
# Setting up training and testing folds
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# Model set up
clf = xgb.XGBClassifier()
clf.fit(X_train, y_train)
# Make predictions
y_pred = clf.predict(X_test)
predictions = [round(value) for value in y_pred]
# Evaluate predictions
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
dump(clf, 'ultimate_data_challenge_model.joblib')
# Feature Selection for the classifier
estimator = clf
selector = RFE(estimator, 4, step=1)
selector = selector.fit(X, y)
print("Feature Ranking: ", selector.ranking_)
# Seems like the most important features are: city, phone, ultimate_black_user, and avg_rating_by_driver
# Using only those features only lowers the predictive accuracy by less than 2%
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
8713,
62,
7890,
62,
18351,
27499,
1330,
1366,
62,
2375,
7574,
198,
11748,
2124,
70,
39521,
355,
2124,
22296,
198,
6738,
1693,
8019,
1330,
10285,
198,
6738,
... | 2.830861 | 674 |
from radionets.simulations.mnist import mnist_fft
from radionets.simulations.gaussians import simulate_gaussian_sources
from radionets.simulations.sampling import sample_frequencies
from radionets.simulations.point_sources import create_point_source_img
import click
from pathlib import Path
def create_fft_images(sim_conf):
"""
Create fft source images and save them to h5 files.
Parameters
----------
sim_conf : dict
dict holding simulation parameters
"""
if sim_conf["type"] == "mnist":
mnist_fft(
resource_path=sim_conf["resource"],
out_path=sim_conf["data_path"],
size=sim_conf["img_size"],
bundle_size=sim_conf["bundle_size"],
noise=sim_conf["noise"],
)
if sim_conf["type"] == "gaussians":
for opt in ["train", "valid", "test"]:
simulate_gaussian_sources(
data_path=sim_conf["data_path"],
option=opt,
num_bundles=sim_conf["bundles_" + str(opt)],
bundle_size=sim_conf["bundle_size"],
img_size=sim_conf["img_size"],
num_comp_ext=sim_conf["num_components"],
noise=sim_conf["noise"],
noise_level=sim_conf["noise_level"],
source_list=sim_conf["source_list"],
)
if sim_conf["type"] == "point_sources":
for opt in ["train", "valid", "test"]:
create_point_source_img(
img_size=sim_conf["img_size"],
bundle_size=sim_conf["bundle_size"],
num_bundles=sim_conf["bundles_" + str(opt)],
path=sim_conf["data_path"],
option=opt,
extended=sim_conf["add_extended"],
)
def sample_fft_images(sim_conf):
"""
check for fft files
keep fft_files?
"""
sample_frequencies(
data_path=sim_conf["data_path"],
amp_phase=sim_conf["amp_phase"],
real_imag=sim_conf["real_imag"],
specific_mask=sim_conf["specific_mask"],
antenna_config=sim_conf["antenna_config"],
lon=sim_conf["lon"],
lat=sim_conf["lat"],
steps=sim_conf["steps"],
fourier=sim_conf["fourier"],
compressed=sim_conf["compressed"],
interpolation=sim_conf["interpolation"],
multi_channel=sim_conf["multi_channel"],
source_type=sim_conf["type"],
)
if sim_conf["keep_fft_files"] is not True:
if click.confirm("Do you really want to delete the fft_files?", abort=False):
fft = {
p
for p in Path(sim_conf["data_path"]).rglob(
"*fft*." + str(sim_conf["data_format"])
)
if p.is_file()
}
[p.unlink() for p in fft]
| [
6738,
2511,
295,
1039,
13,
14323,
5768,
13,
10295,
396,
1330,
285,
77,
396,
62,
487,
83,
198,
6738,
2511,
295,
1039,
13,
14323,
5768,
13,
4908,
1046,
1547,
1330,
29308,
62,
4908,
31562,
62,
82,
2203,
198,
6738,
2511,
295,
1039,
13,
... | 2.008511 | 1,410 |
import os
from typing import Optional
import pytest
import torch
from pytest import approx
from torch.nn import Linear
from torch.nn.functional import mse_loss
from torch.optim import SGD
import ignite.distributed as idist
from ignite.engine import create_supervised_evaluator, create_supervised_trainer
from ignite.metrics import MeanSquaredError
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
@pytest.mark.skipif(idist.has_xla_support, reason="Skip if has PyTorch XLA package")
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
| [
11748,
28686,
198,
6738,
19720,
1330,
32233,
198,
198,
11748,
12972,
9288,
198,
11748,
28034,
198,
6738,
12972,
9288,
1330,
5561,
198,
6738,
28034,
13,
20471,
1330,
44800,
198,
6738,
28034,
13,
20471,
13,
45124,
1330,
285,
325,
62,
22462,... | 2.966258 | 326 |
i_am_a_variable = 9
| [
198,
72,
62,
321,
62,
64,
62,
45286,
796,
860,
198
] | 1.909091 | 11 |
import subprocess
from datetime import datetime
from inspect import isclass
from typing import Union, Any, Type, Iterable, Optional
import typing
from dateutil import tz
from cattleman.constants import UNDEFINED
from cattleman.exceptions import TypeMismatchException
from cpk.constants import CANONICAL_ARCH
| [
11748,
850,
14681,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
10104,
1330,
318,
4871,
198,
6738,
19720,
1330,
4479,
11,
4377,
11,
5994,
11,
40806,
540,
11,
32233,
198,
198,
11748,
19720,
198,
6738,
3128,
22602,
1330,
256,
89,
... | 3.732558 | 86 |
#
# Several devices for PC are being considered or implemented.
# ==========================================================
# // port
# https://forum.micropython.org/viewtopic.php?f=2&t=3053
# I2C, https://github.com/pmp-p/I2C-Tiny-USB
# arduino + TinyPacks
# aruduino sim https://github.com/netpipe/IrrlichtDemos/tree/master/Apps/ArduinoSim
# esp* board with (web)socket or UART
# blue pill + tiny usb stack
| [
2,
198,
2,
12168,
4410,
329,
4217,
389,
852,
3177,
393,
9177,
13,
198,
2,
46111,
4770,
2559,
28,
198,
198,
2,
220,
3373,
2493,
198,
2,
220,
3740,
1378,
27302,
13,
9383,
1773,
7535,
13,
2398,
14,
1177,
26652,
13,
10121,
30,
69,
2... | 2.93662 | 142 |
#!/usr/bin/python
from keras.layers import Activation,Dropout,Flatten,Dense,Conv2D,MaxPooling2D
from keras.callbacks import EarlyStopping, TensorBoard, ModelCheckpoint
from keras.models import Sequential
from datetime import datetime
import numpy as np
PATIENCE = 10
LOG_DIR_ROOT = "."
N_LAYERS = 4
MIN_NEURONS = 20
MAX_NEURONS = 120
KERNEL = (3,3)
EPOCHS = 150
BATCH_SIZE = 200
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
13144,
341,
11,
26932,
448,
11,
7414,
41769,
11,
35,
1072,
11,
3103,
85,
17,
35,
11,
11518,
27201,
278,
17,
35,
198,
6738,
41927,
292,
13,
13345,
101... | 2.627586 | 145 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from django.dispatch import receiver
from django.db.models.signals import post_save
from .models import Book
@receiver(post_save, sender=Book)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
13,
6381,
17147,
1330,
9733,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
12683,
874,
1330... | 2.838235 | 68 |
# Generated by Django 3.0.4 on 2020-06-10 14:12
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
19,
319,
12131,
12,
3312,
12,
940,
1478,
25,
1065,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from PIL import Image
from tools import box_generator, BLOCK_WIDTH, BLOCK_HEIGHT, ParameterException
| [
6738,
350,
4146,
1330,
7412,
198,
198,
6738,
4899,
1330,
3091,
62,
8612,
1352,
11,
9878,
11290,
62,
54,
2389,
4221,
11,
9878,
11290,
62,
13909,
9947,
11,
25139,
2357,
16922,
628
] | 3.21875 | 32 |
from django.contrib import admin
from .models import Images, Location, Category
# Register your models here.
admin.site.register(Location)
admin.site.register(Images,admin_class=ImageAdmin)
admin.site.register(Category) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
5382,
11,
13397,
11,
21743,
198,
198,
2,
17296,
534,
4981,
994,
13,
198,
28482,
13,
15654,
13,
30238,
7,
14749,
8,
198,
28482,
13,
15654,
13,
30238,
7,
293... | 3.666667 | 60 |
'''
identifica_objetos_ em escala de cinza
identifica oobetos brancos em fundo preto e desenha um círculo ao redor
'''
import cv2 as cv
import numpy as np
video_cap = cv.VideoCapture(0)
video_cap.set(3, 360)
video_cap.set(4, 640)
video_fps = int(video_cap.get(cv.CAP_PROP_FPS))
cv.namedWindow("Reguladores")
cv.resizeWindow("Reguladores", 360, 300)
cv.createTrackbar("minimo", "Reguladores", 0, 255, nothing)
cv.createTrackbar("maximo", "Reguladores", 0, 255, nothing)
cv.createTrackbar("area_minimo", "Reguladores", 0, 300, nothing)
cv.createTrackbar("area_maximo", "Reguladores", 0, 300, nothing)
while video_cap.isOpened():
sucesso, frame = video_cap.read()
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
kernel = (5, 5)
frame_blur = cv.GaussianBlur(frame_gray, kernel, 1)
minino = cv.getTrackbarPos("minimo", "Reguladores")
maximo = cv.getTrackbarPos("maximo", "Reguladores")
area_minino = cv.getTrackbarPos("area_minino", "Reguladores")
area_maximo = cv.getTrackbarPos("area_maximo", "Reguladores")
lx, frame_thresh = cv.threshold(frame_gray, minino, maximo, cv.THRESH_BINARY)
bordas = cv.Canny(frame_thresh, minino, maximo)
objetos, lx = cv.findContours(bordas, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
#cv.drawContours(frame, objetos, -1, (0, 255, 0), 3)
for objeto in objetos:
x, y, w, h = cv.boundingRect(objeto)
if area_minino < cv.contourArea(objeto) < area_maximo:
#continue
print(len(objetos))
cv.circle(frame, (x, y), int(h/2), (0, 255, 255), 1)
#cv.rectangle(frame, (x, y), (x+w, h+y), (255, 0, 0, 0.2), 2)
#uniao_frames = np.vstack([np.hstack([frame, frame_gray]), np.hstack([frame_blur, frame_thresh])])
cv.imshow("Janela com frame", frame)
cv.imshow("Janela com bordas", bordas)
cv.imshow("Janela com frame_thresh", frame_thresh)
if cv.waitKey(video_fps) == 27:
break
cv.destroyAllWindows()
video_cap.release()
| [
7061,
6,
198,
738,
811,
64,
62,
26801,
316,
418,
62,
795,
3671,
6081,
390,
269,
259,
4496,
198,
738,
811,
64,
267,
672,
316,
418,
865,
1192,
418,
795,
1814,
78,
662,
1462,
304,
748,
268,
3099,
23781,
269,
8836,
81,
3129,
78,
257... | 2.184211 | 912 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-03 10:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.wagtailcore.fields
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
24,
319,
1584,
12,
2931,
12,
3070,
838,
25,
3559,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
19... | 2.829545 | 88 |
# -*- coding: utf-8 -*-
'''
The cp module is used to execute the logic used by the salt-cp command
line application, salt-cp is NOT intended to broadcast large files, it is
intended to handle text files.
Salt-cp can be used to distribute configuration files
'''
# Import python libs
from __future__ import print_function
from __future__ import absolute_import
import base64
import errno
import logging
import os
import re
import sys
# Import salt libs
import salt.client
import salt.utils.gzip_util
import salt.utils.itertools
import salt.utils.minions
import salt.utils.parsers
import salt.utils.platform
import salt.utils.stringutils
from salt.utils.verify import verify_log
import salt.output
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
class SaltCPCli(salt.utils.parsers.SaltCPOptionParser):
'''
Run the salt-cp command line client
'''
def run(self):
'''
Execute salt-cp
'''
self.parse_args()
# Setup file logging!
self.setup_logfile_logger()
verify_log(self.config)
cp_ = SaltCP(self.config)
cp_.run()
class SaltCP(object):
'''
Create a salt cp object, used to distribute simple files with salt
'''
def _recurse(self, path):
'''
Get a list of all specified files
'''
files = {}
empty_dirs = []
try:
sub_paths = os.listdir(path)
except OSError as exc:
if exc.errno == errno.ENOENT:
# Path does not exist
sys.stderr.write('{0} does not exist\n'.format(path))
sys.exit(42)
elif exc.errno in (errno.EINVAL, errno.ENOTDIR):
# Path is a file (EINVAL on Windows, ENOTDIR otherwise)
files[path] = self._mode(path)
else:
if not sub_paths:
empty_dirs.append(path)
for fn_ in sub_paths:
files_, empty_dirs_ = self._recurse(os.path.join(path, fn_))
files.update(files_)
empty_dirs.extend(empty_dirs_)
return files, empty_dirs
def run(self):
'''
Make the salt client call
'''
files, empty_dirs = self._list_files()
dest = self.opts['dest']
gzip = self.opts['gzip']
tgt = self.opts['tgt']
timeout = self.opts['timeout']
selected_target_option = self.opts.get('selected_target_option')
dest_is_dir = bool(empty_dirs) \
or len(files) > 1 \
or bool(re.search(r'[\\/]$', dest))
reader = salt.utils.gzip_util.compress_file \
if gzip \
else salt.utils.itertools.read_file
minions = salt.utils.minions.CkMinions(self.opts).check_minions(
tgt,
tgt_type=selected_target_option or 'glob')
local = salt.client.get_local_client(self.opts['conf_file'])
ret = {}
parent = '..' + os.sep
for fn_, mode in six.iteritems(files):
remote_path = _get_remote_path(fn_)
index = 1
failed = {}
for chunk in reader(fn_, chunk_size=self.opts['salt_cp_chunk_size']):
chunk = base64.b64encode(salt.utils.stringutils.to_bytes(chunk))
append = index > 1
log.debug(
'Copying %s to %starget \'%s\' as %s%s',
fn_,
'{0} '.format(selected_target_option)
if selected_target_option
else '',
tgt,
remote_path,
' (chunk #{0})'.format(index) if append else ''
)
args = [
tgt,
'cp.recv',
[remote_path, chunk, append, gzip, mode],
timeout,
]
if selected_target_option is not None:
args.append(selected_target_option)
result = local.cmd(*args)
if not result:
# Publish failed
msg = (
'Publish failed.{0} It may be necessary to '
'decrease salt_cp_chunk_size (current value: '
'{1})'.format(
' File partially transferred.' if index > 1 else '',
self.opts['salt_cp_chunk_size'],
)
)
for minion in minions:
ret.setdefault(minion, {})[remote_path] = msg
break
for minion_id, minion_ret in six.iteritems(result):
ret.setdefault(minion_id, {})[remote_path] = minion_ret
# Catch first error message for a given minion, we will
# rewrite the results after we're done iterating through
# the chunks.
if minion_ret is not True and minion_id not in failed:
failed[minion_id] = minion_ret
index += 1
for minion_id, msg in six.iteritems(failed):
ret[minion_id][remote_path] = msg
for dirname in empty_dirs:
remote_path = _get_remote_path(dirname)
log.debug(
'Creating empty dir %s on %starget \'%s\'',
dirname,
'{0} '.format(selected_target_option)
if selected_target_option
else '',
tgt,
)
args = [tgt, 'cp.recv', [remote_path, None], timeout]
if selected_target_option is not None:
args.append(selected_target_option)
for minion_id, minion_ret in six.iteritems(local.cmd(*args)):
ret.setdefault(minion_id, {})[remote_path] = minion_ret
salt.output.display_output(
ret,
self.opts.get('output', 'nested'),
self.opts)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
464,
31396,
8265,
318,
973,
284,
12260,
262,
9156,
973,
416,
262,
8268,
12,
13155,
3141,
198,
1370,
3586,
11,
8268,
12,
13155,
318,
5626,
5292,
284,
7025... | 1.903186 | 3,202 |
from keras import models
import cairocffi as cairo
import numpy as np
import matplotlib.pyplot as plt
# load model
model = models.load_model('model_20_include_weight.h5')
# load model
# load class_name
class_names = []
with open('new_class_name.txt') as f:
class_names = f.read().splitlines()
# print(class_names)
# load class_name
# ##########吃三維的陣列
# line_diameter 要因應tk或網頁調整
if __name__ == "__main__":
# 輸入陣列
vector_image = [[[118, 118, 119, 119, 120, 120, 120, 120, 121, 121, 121, 121, 121, 121, 121, 121, 121, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122], [107, 108, 108, 109, 110, 112, 113, 115, 117, 118, 119, 121, 122, 123, 124, 125, 127, 129, 131, 133, 134, 136, 137, 139, 140, 141, 142, 142, 143, 144, 145, 145, 146, 147]], [[106, 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, 107, 107, 107, 108, 108, 108, 108, 109, 109, 109, 110, 110, 110, 111, 111, 112, 112, 112, 113, 113, 113, 114, 114, 115, 115, 115, 115, 116, 116, 116, 116, 116, 117, 117, 117, 117, 117, 117, 117, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 117, 117, 117, 117, 117, 116, 116, 116, 115, 115, 115, 114, 114, 114, 113, 113, 113, 113, 112, 112, 112, 111, 111, 110, 110, 109, 108, 108, 107, 107, 106, 106, 105, 104, 104, 104, 103, 102, 102, 101, 101, 100, 100, 99, 99, 99, 98, 98, 98, 97, 97, 97, 97, 97, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 97, 97, 97, 97, 97, 98, 98, 98, 99, 99, 99, 100, 100, 100, 101, 101, 102, 103, 103, 104, 104, 104, 105, 105, 106, 106, 107, 107, 107, 107, 108, 108, 108, 109, 109, 109, 110, 110, 110, 111, 111, 111, 112, 112, 112, 113, 113, 114, 114, 114, 114, 115, 116, 116, 116, 117, 117, 118, 119, 119, 120, 120, 121, 122, 122, 123, 124, 124, 124, 125, 125, 126, 126, 126, 127, 127, 128, 128, 128, 128, 128, 128, 129, 129, 129, 129, 129, 130, 130, 130, 130, 131, 131, 131, 131, 131, 131, 131, 131, 131, 131, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 131, 131, 131, 130, 130, 129, 129, 128, 128, 127, 126, 126, 125, 124, 124, 123, 122, 121, 121, 120, 120, 118, 117, 117, 116, 115, 114, 113, 112, 112, 111, 110, 110, 109, 108, 107, 106, 106, 105, 104, 104, 103, 103, 102, 101, 101, 100, 99, 99, 99, 98, 97, 97, 96, 96, 95, 95, 95, 95, 94, 94, 94, 93, 93, 92, 92, 92, 91, 91, 90, 90, 89, 89, 89, 88, 88, 87, 87, 86, 86, 86, 85, 85, 85, 84, 84, 84, 84, 84, 83, 83, 83, 82, 82, 82, 82, 82, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 82, 82, 83, 83, 83, 84, 84, 84, 85, 85, 86, 86, 87, 87, 88, 88, 88, 88, 89, 90, 91, 91, 92, 92, 93, 94, 94, 95, 96, 96, 97, 97, 98, 98, 99, 99, 100, 101, 102, 102, 103, 104, 104, 105, 105, 106, 106, 106, 106, 107, 107, 108, 108, 109, 109, 109, 110, 110, 111, 112, 112, 113, 113, 114, 114, 115, 116, 116, 117, 118, 119, 119, 120, 120, 121, 122, 122, 123, 124, 125, 125, 126, 127, 127, 128, 128, 129, 129, 130, 130, 131, 131, 132, 132, 133, 133, 133, 134, 134, 135, 135, 136, 136, 136, 137, 137, 137, 137, 138, 138, 139, 139, 139, 140, 140, 140, 140, 140, 140, 141, 141, 141, 141, 141, 142, 142, 142, 142, 142, 142, 142, 142, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 142, 142, 142, 142, 142, 142, 142, 141, 141, 141, 141, 141, 140, 140, 140, 140, 140, 139, 139, 139, 139, 139, 138, 138, 138, 138, 137, 137, 136, 136, 136, 135, 135, 135, 134, 134, 134, 133, 133, 132, 132, 132, 131, 131, 130, 130, 129, 129, 128, 128, 128, 127, 126, 126, 125, 124, 124, 123, 123, 122, 121, 121, 120, 120, 119, 118, 117, 116, 116, 116, 115, 114, 114, 113, 112, 112, 112, 111, 111, 111, 110, 110, 110, 109, 109, 109], [76, 76, 75, 75, 75, 74, 73, 73, 72, 72, 71, 71, 70, 70, 69, 69, 68, 67, 67, 66, 66, 66, 66, 66, 65, 65, 65, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 65, 65, 66, 67, 68, 68, 69, 69, 69, 70, 70, 71, 71, 72, 73, 73, 74, 75, 76, 77, 77, 78, 78, 78, 79, 80, 80, 81, 81, 81, 82, 82, 83, 84, 84, 84, 84, 84, 85, 85, 85, 85, 85, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 85, 85, 84, 84, 84, 83, 83, 83, 82, 81, 80, 79, 79, 78, 78, 78, 77, 76, 75, 75, 74, 73, 73, 72, 71, 71, 70, 69, 69, 68, 67, 67, 66, 66, 65, 65, 64, 64, 64, 63, 63, 62, 61, 61, 60, 60, 60, 59, 58, 58, 57, 56, 55, 54, 54, 53, 53, 52, 52, 51, 50, 50, 49, 49, 49, 48, 48, 48, 48, 48, 48, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 48, 48, 48, 48, 49, 49, 49, 49, 50, 50, 50, 51, 52, 52, 52, 53, 53, 54, 54, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58, 58, 58, 58, 60, 60, 61, 61, 62, 63, 64, 64, 65, 66, 67, 68, 68, 69, 69, 71, 72, 72, 73, 74, 74, 75, 75, 76, 78, 78, 78, 79, 80, 80, 81, 81, 82, 83, 83, 84, 84, 85, 86, 87, 88, 88, 89, 90, 90, 92, 93, 93, 94, 94, 95, 96, 96, 96, 97, 97, 98, 98, 99, 99, 99, 99, 100, 101, 101, 101, 102, 102, 102, 102, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 102, 102, 102, 101, 101, 101, 101, 101, 101, 100, 100, 99, 99, 98, 98, 98, 97, 97, 96, 95, 95, 94, 93, 92, 92, 91, 90, 90, 89, 89, 89, 88, 87, 87, 87, 86, 85, 84, 84, 83, 82, 81, 81, 80, 79, 78, 77, 75, 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 55, 54, 53, 52, 52, 50, 49, 49, 48, 48, 47, 46, 46, 45, 44, 43, 43, 42, 41, 41, 40, 40, 39, 39, 38, 37, 37, 37, 37, 37, 36, 36, 35, 35, 35, 34, 34, 34, 34, 34, 34, 34, 34, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 34, 34, 34, 34, 34, 35, 35, 35, 36, 36, 36, 36, 36, 37, 37, 37, 37, 37, 37, 38, 38, 38, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 41, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 46, 46, 47, 48, 49, 49, 50, 50, 51, 51, 52, 53, 53, 54, 55, 55, 56, 57, 58, 58, 59, 60, 61, 61, 61, 62, 63, 64, 64, 65, 66, 66, 67, 68, 69, 70, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 81, 82, 83, 84, 85, 85, 86, 87, 88, 89, 90, 90, 91, 92, 93, 93, 94, 94, 95, 96, 96, 96, 96, 97, 98, 98, 98, 98, 99, 99, 99, 99, 100, 100, 101, 101, 101, 102, 102, 103, 103, 104, 104, 104, 104, 104, 104, 105, 105, 105, 106, 107, 107, 107, 108, 108, 108, 109, 109, 109, 109, 109, 109, 110, 110, 110, 110, 110, 110, 110, 110, 110, 111, 111, 112, 112, 112, 112, 112, 112, 113, 113, 113, 113, 113, 113, 113, 113, 114, 114, 114, 114, 114]]] # 三維陣列
# 輸入陣列
raster_image = vector_to_raster(vector_image)
# 前處理
img = raster_image.reshape(1, 28, 28, 1).astype('float32')
img /= 255
# 前處理
# plt.imshow(img.squeeze())
# 預測
pred = model.predict(img)[0]
print(f'各類預測值 = {pred}')
ind = (-pred).argsort()[:5]
top_5 = [class_names[x] for x in ind]
print(f'預測前五名: {top_5}')
# 預測
| [
6738,
41927,
292,
1330,
4981,
201,
198,
11748,
1275,
7058,
66,
487,
72,
355,
1275,
7058,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
201,
198,
2,
3440,
2746,
... | 2.161651 | 3,223 |
# Generated by Django 2.2.12 on 2020-06-03 21:06
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1065,
319,
12131,
12,
3312,
12,
3070,
2310,
25,
3312,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
import gc
import sys
import weakref
# Выводит количество ссылок для объекта. Количество ссылок будет зависеть и от использования
# в стандартной библиотеке Python. Именно поэтому у `1` больше ссылок, чем у `102332`.
# Минимальное значение равно `3`, потому что сам `sys.getrefcount` создаёт временные ссылки,
# когда вызывается. В целом можно воспринимать `3`, как значение, что объект используется
# только в одном месте в коде, и нигде больше. И если импортировать `numpy` и `matplotlib`,
# то использование маленьких чисел ещё возрастёт - количество ссылок увеличится.
print(sys.getrefcount(1)) # 180
print(sys.getrefcount(2)) # 127
print(sys.getrefcount(3)) # 40
print(sys.getrefcount(4)) # 75
print(sys.getrefcount(5)) # 32
print(sys.getrefcount(102332)) # 3
print(sys.getrefcount(10231132)) # 3
print('-' * 80)
# Количество затрачиваемой памяти под каждый тип:
# Bytes type empty + scaling notes
# 24 int NA
# 28 long NA
# 37 str + 1 byte per additional character
# 52 unicode + 4 bytes per additional character
# 56 tuple + 8 bytes per additional item
# 72 list + 32 for first, 8 for each additional
# 232 set sixth item increases to 744; 22nd, 2280; 86th, 8424
# 280 dict sixth item increases to 1048; 22nd, 3352; 86th, 12568 *
# 120 func def does not include default args and other attrs
# 64 class inst has a __dict__ attr, same scaling as dict above
# 16 __slots__ class with slots has no dict, seems to store in
# mutable tuple-like structure.
# 904 class def has a proxy __dict__ structure for class attrs
# 104 old class makes sense, less stuff, has real dict though.
foo = Foo()
# Просто объект по адресу: `foo: <__main__.Foo object at 0x01D5B0D0>`
print('foo:', foo)
foo_weak_ref = weakref.ref(foo, weak_ref_callback)
# Слабая ссылка по адресу: `foo_weak_ref: <weakref at 0x03A41A00; to 'Foo' at 0x01D5B0D0>`
print(f'foo_weak_ref: {foo_weak_ref}')
# Получение объекта `foo` по слабой ссылке: `foo_weak_ref(): <__main__.Foo object at 0x01D5B0D0>`
print(f'foo_weak_ref(): {foo_weak_ref()}')
# Получение количества слабых ссылок на объект `foo`. Равно `1`.
print(weakref.getweakrefcount(foo))
# Удаление объекта. Так как мы используем слабую ссылку, GC удалит объект.
# Вначале вызывается метод `__del__`: `Deleting <__main__.Foo object at 0x0143B0D0>`
# Далее срабатывает `weak_ref_callback`: `Reference: <weakref at 0x03813988; dead>`
del foo
# Пытаемся получить объект по слабой ссылке, но возвращает `foo_weak_ref(): None`,
# так как мы удалили объект
print(f'foo_weak_ref(): {foo_weak_ref()}')
print('-' * 80)
# Вместо использования `weakref.ref` напрямую, лучше использовать прокси. Прокси можно использовать
# как-будто они являются оригинальными объектами (теми, на которые они ссылаются). В тамом случае не
# придётся вызывать `ref()` для доступа к объекту.
bar = Bar()
bar_weak_ref = weakref.ref(bar)
bar_proxy = weakref.proxy(bar)
print('bar:', bar)
print('bar_weak_ref():', bar_weak_ref()) # bar_weak_ref(): <__main__.Bar object at 0x037CF6D0>
print('bar_proxy:', bar_proxy) # bar_proxy: <__main__.Bar object at 0x037CF6D0>
# Если попытаться получить доступ к объекту через прокси, после того, как объекь был удалён,
# будет выкинуто исключение `ReferenceError: weakly-referenced object no longer exists`.
del bar # print('bar_proxy:', bar_proxy.data)
# Кэширование объектов. `ref` и `proxy` считаются низкоуровневыми. Они полезны для создания слабых ссылок для
# отдельных объектов и позволяют создавать циклические ссылки, которые будут собраны GC. Если требуется создавать
# кэш нескольких объектов, самым подходящим API будет `WeakKeyDictionary` и `WeakValueDictionary`.
# Если нужно включить выведение адресов и объектов, которые удаляются GC, то раскомментировать.
# gc.set_debug(gc.DEBUG_LEAK)
demo(dict)
print('---')
demo(weakref.WeakValueDictionary)
# todo: циклические ссылки
| [
11748,
308,
66,
198,
11748,
25064,
198,
11748,
4939,
5420,
628,
198,
2,
12466,
240,
45035,
38857,
25443,
112,
18849,
20375,
12466,
118,
25443,
119,
18849,
141,
229,
16843,
21727,
20375,
38857,
15166,
220,
21727,
21727,
45035,
30143,
25443,
... | 1.462583 | 2,726 |
"""Calibration routine for DSA-110 calibration with CASA.
Author: Dana Simard, dana.simard@astro.caltech.edu, 2020/06
"""
import shutil
import os
import glob
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from astropy.coordinates import Angle
import pandas
import scipy # pylint: disable=unused-import
from casacore.tables import table
import dsautils.calstatus as cs
import dsacalib.utils as du
import dsacalib.ms_io as dmsio
import dsacalib.fits_io as dfio
import dsacalib.calib as dc
import dsacalib.plotting as dp
import dsacalib.fringestopping as df
import dsacalib.constants as ct
from dsacalib.ms_io import extract_vis_from_ms
import astropy.units as u # pylint: disable=wrong-import-order
from astropy.utils import iers # pylint: disable=wrong-import-order
iers.conf.iers_auto_url_mirror = ct.IERS_TABLE
iers.conf.auto_max_age = None
from astropy.time import Time # pylint: disable=wrong-import-position
def _check_path(fname):
"""Raises an AssertionError if the path `fname` does not exist.
Parameters
----------
fname : str
The file to check existence of.
"""
assert os.path.exists(fname), 'File {0} does not exist'.format(fname)
def triple_antenna_cal(
obs_params, ant_params, throw_exceptions=True, sefd=False, logger=None
):
r"""Calibrate visibilities from 3 antennas.
Assumes visbilities are stored using dsa-10 or dsa-110 fits format.
The caltable\_to\_etcd function should be able to handle this, but I haven't
tested that yet.
Parameters
----------
obs_params : dict
Observing parameters
ant_params : dict
show_plots : Boolean
If set to ``True``, plots of the delay and gain calibration solutions
will be shown. Defaults ``False``.
throw_exception : Boolean
If set to ``True``, exceptions will be thrown after being logged in
syslog. If set to ``False``, the exceptions will not be thrown, but
will still be logged in syslog. Defaults ``True``.
sefd : Boolean
If set to ``True``, enough data (60 minutes) will be included in the
measurement set to calculate the off-source power (60 minutes) and the
calibration solutions will be solved against a model of ones. If set to
``False``, only 10 minutes will be included in the measurement set and
the calibration solutison will be solved against a sky model.
logger : dsautils.dsa_syslog.DsaSyslogger() instance
Logger to write messages too. If None, messages are printed.
Returns
-------
status : int
The status code of the pipeline. Decode with dsautils.calstatus.
caltime : float
The meridian crossing time of the source in MJD. If the input file
could not be opened, ``None`` will be returned.
"""
# TODO: Only keep one of the gain tables in the end, on a fine timescale.
status = 0
current_error = cs.UNKNOWN_ERR
calstring = 'initialization'
try:
fname = obs_params['fname']
msname = obs_params['msname']
cal = obs_params['cal']
utc_start = obs_params['utc_start']
pt_dec = ant_params['pt_dec']
antenna_order = ant_params['antenna_order']
refant = ant_params['refant']
antpos = ant_params['antpos']
# Remove files that we will create so that things will fail if casa
# doesn't write a table.
casa_dirnames = [
'{0}.ms'.format(msname),
'{0}_{1}_kcal'.format(msname, cal.name),
'{0}_{1}_2kcal'.format(msname, cal.name),
'{0}_{1}_bcal'.format(msname, cal.name),
'{0}_{1}_gpcal'.format(msname, cal.name),
'{0}_{1}_gacal'.format(msname, cal.name),
'{0}_{1}_gcal_ant'.format(msname, cal.name)
]
for dirname in casa_dirnames:
if os.path.exists(dirname):
shutil.rmtree(dirname)
calstring = 'opening visibility file'
current_error = (
cs.INFILE_ERR |
cs.INV_ANTNUM |
cs.INV_GAINAMP_P1 |
cs.INV_GAINAMP_P2 |
cs.INV_GAINPHASE_P1 |
cs.INV_GAINPHASE_P2 |
cs.INV_DELAY_P1 |
cs.INV_DELAY_P2 |
cs.INV_GAINCALTIME |
cs.INV_DELAYCALTIME
)
caldur = 60*u.min if sefd else 10*u.min
fobs, blen, bname, _tstart, _tstop, tsamp, vis, mjd, lst, \
transit_idx, antenna_order = dfio.read_psrfits_file(
fname,
cal,
antenna_order=antenna_order,
autocorrs=True,
dur=caldur,
utc_start=utc_start,
dsa10=False,
antpos=antpos
)
caltime = mjd[transit_idx]
calstring = 'read and verification of visibility file'
current_error = (
cs.CAL_MISSING_ERR |
cs.INV_GAINAMP_P1 |
cs.INV_GAINAMP_P2 |
cs.INV_GAINPHASE_P1 |
cs.INV_GAINPHASE_P2 |
cs.INV_DELAY_P1 |
cs.INV_DELAY_P2 |
cs.INV_GAINCALTIME |
cs.INV_DELAYCALTIME
)
nt = vis.shape[1]
assert nt > 0, "calibrator not in file"
current_error = (
cs.INFILE_FORMAT_ERR |
cs.INV_GAINAMP_P1 |
cs.INV_GAINAMP_P2 |
cs.INV_GAINPHASE_P1 |
cs.INV_GAINPHASE_P2 |
cs.INV_DELAY_P1 |
cs.INV_DELAY_P2 |
cs.INV_GAINCALTIME |
cs.INV_DELAYCALTIME
)
nant = len(antenna_order)
assert nant == 3, ("triple_antenna_cal only works with a triplet of "
"antennas")
assert int(refant) in antenna_order, ("refant {0} not in "
"visibilities".format(refant))
calstring = "flagging of ms data"
current_error = (
cs.FLAGGING_ERR |
cs.INV_GAINAMP_P1 |
cs.INV_GAINAMP_P2 |
cs.INV_GAINPHASE_P1 |
cs.INV_GAINPHASE_P2 |
cs.INV_DELAY_P1 |
cs.INV_DELAY_P2 |
cs.INV_GAINCALTIME |
cs.INV_DELAYCALTIME
)
# maskf, _fraction_flagged = du.mask_bad_bins(
# vis,
# axis=2,
# thresh=2.0,
# # medfilt=True, # currently not supported
# nmed=129
# )
# maskt, _fraction_flagged = du.mask_bad_bins(
# vis,
# axis=1,
# thresh=2.0,
# # medfilt=True, # currently not supported
# nmed=129
# )
maskp, _fraction_flagged = du.mask_bad_pixels(
vis,
thresh=6.0,
#mask=maskt*maskf
)
# mask = maskt*maskf*maskp
# vis *= mask
vis *= maskp
calstring = 'fringestopping'
current_error = (
cs.FRINGES_ERR |
cs.INV_GAINAMP_P1 |
cs.INV_GAINAMP_P2 |
cs.INV_GAINPHASE_P1 |
cs.INV_GAINPHASE_P2 |
cs.INV_DELAY_P1 |
cs.INV_DELAY_P2 |
cs.INV_GAINCALTIME |
cs.INV_DELAYCALTIME
)
df.fringestop(vis, blen, cal, mjd, fobs, pt_dec)
calstring = 'writing to ms'
current_error = (
cs.MS_WRITE_ERR |
cs.INV_GAINAMP_P1 |
cs.INV_GAINAMP_P2 |
cs.INV_GAINPHASE_P1 |
cs.INV_GAINPHASE_P2 |
cs.INV_DELAY_P1 |
cs.INV_DELAY_P2 |
cs.INV_GAINCALTIME |
cs.INV_DELAYCALTIME
)
amp_model = df.amplitude_sky_model(cal, lst, pt_dec, fobs)
amp_model = np.tile(
amp_model[np.newaxis, :, :, np.newaxis],
(vis.shape[0], 1, 1, vis.shape[-1])
)
dmsio.convert_to_ms(
cal,
vis,
mjd[0],
'{0}'.format(msname),
bname,
antenna_order,
tsamp,
nint=25,
antpos=antpos,
dsa10=False,
model=None if sefd else amp_model
)
_check_path('{0}.ms'.format(msname))
calstring = 'flagging of ms data'
current_error = (
cs.FLAGGING_ERR |
cs.INV_GAINAMP_P1 |
cs.INV_GAINAMP_P2 |
cs.INV_GAINPHASE_P1 |
cs.INV_GAINPHASE_P2 |
cs.INV_DELAY_P1 |
cs.INV_DELAY_P2 |
cs.INV_GAINCALTIME |
cs.INV_DELAYCALTIME
)
error = dc.flag_zeros(msname)
if error > 0:
status = cs.update(status, ['flagging_err'])
message = "Non-fatal error in zero flagging"
if logger is not None:
logger.info(message)
else:
print(message)
if 8 in antenna_order:
error = dc.flag_antenna(msname, '8', pol='A')
if error > 0:
status = cs.update(status, ['flagging_err'])
message = "Non-fatal error in antenna 8 flagging"
if logger is not None:
logger.info(message)
else:
print(message)
# Antenna-based delay calibration
calstring = 'delay calibration'
current_error = (
cs.DELAY_CAL_ERR |
cs.INV_GAINAMP_P1 |
cs.INV_GAINAMP_P2 |
cs.INV_GAINPHASE_P1 |
cs.INV_GAINPHASE_P2 |
cs.INV_DELAY_P1 |
cs.INV_DELAY_P2 |
cs.INV_GAINCALTIME |
cs.INV_DELAYCALTIME
)
error = dc.delay_calibration(msname, cal.name, refants=[refant])
if error > 0:
status = cs.update(status, ['delay_cal_err'])
message = 'Non-fatal error occured in delay calibration.'
if logger is not None:
logger.info(message)
else:
print(message)
_check_path('{0}_{1}_kcal'.format(msname, cal.name))
calstring = 'flagging of ms data'
current_error = (
cs.FLAGGING_ERR |
cs.INV_GAINAMP_P1 |
cs.INV_GAINAMP_P2 |
cs.INV_GAINPHASE_P1 |
cs.INV_GAINPHASE_P2 |
cs.INV_GAINCALTIME
)
if error > 0:
status = cs.update(status, ['flagging_err'])
message = 'Non-fatal error occured in calculation of delays on short timescales.'
if logger is not None:
logger.info(message)
else:
print(message)
if error > 0:
status = cs.update(status, ['flagging_err'])
message = 'Non-fatal error occured in flagging of bad timebins'
if logger is not None:
logger.info(message)
else:
print(message)
_check_path('{0}_{1}_2kcal'.format(msname, cal.name))
calstring = 'baseline-based bandpass and gain calibration'
current_error = (
cs.GAIN_BP_CAL_ERR |
cs.INV_GAINAMP_P1 |
cs.INV_GAINAMP_P2 |
cs.INV_GAINPHASE_P1 |
cs.INV_GAINPHASE_P2 |
cs.INV_GAINCALTIME
)
error = dc.calibrate_gain(
msname,
cal.name,
'{0}_{1}'.format(msname, cal.name),
refant,
tga='inf',
tgp='inf',
blbased=True,
combined=False
)
if error > 0:
status = cs.update(status, ['gain_bp_cal_err'])
message = 'Non-fatal error occured in gain/bandpass calibration.'
if logger is not None:
logger.info(message)
else:
print(message)
for fname in [
'{0}_{1}_bcal'.format(msname, cal.name),
'{0}_{1}_gpcal'.format(msname, cal.name),
'{0}_{1}_gacal'.format(msname, cal.name)
]:
_check_path(fname)
calstring = 'calculation of antenna gains'
gamp, _tamp, famp, _ant1, _ant2 = dmsio.read_caltable(
'{0}_{1}_gacal'.format(msname, cal.name),
cparam=True
)
gphase, _tphase, fphase, _ant1, _ant2 = dmsio.read_caltable(
'{0}_{1}_gpcal'.format(msname, cal.name),
cparam=True
)
gains = (gamp*gphase).squeeze(axis=2)
flags = (famp*fphase).squeeze(axis=2)
# This makes some assumptions about the bl order! Should add some
# statements to make sure it's true
gains, flags = dc.fill_antenna_gains(gains, flags)
# These tables will contain the results on fine time-scales.
gamp = np.abs(gains).astype(np.complex128)
gamp = gamp.reshape(gamp.shape[0], -1)
# tb = cc.table()
with table(
'{0}_{1}_gacal'.format(msname, cal.name),
readonly=False
) as tb:
shape = np.array(tb.CPARAM[:]).shape
tb.putcol('CPARAM', gamp.reshape(shape))
gphase = np.exp(1.j*np.angle(gains))
with table(
'{0}_{1}_gpcal'.format(msname, cal.name),
readonly=False
) as tb:
shape = np.array(tb.CPARAM[:]).shape
tb.putcol('CPARAM', gphase.reshape(shape))
if not sefd:
# reduce to a single value to use
mask = np.ones(flags.shape)
mask[flags == 1] = np.nan
gains = np.nanmedian(gains*mask, axis=1, keepdims=True)
flags = np.min(flags, axis=1, keepdims=True)
if 8 in antenna_order:
flags[..., 0] = 1
shutil.copytree(
'{0}/template_gcal_ant'.format(ct.PKG_DATA_PATH),
'{0}_{1}_gcal_ant'.format(msname, cal.name)
)
# Write out a new gains that is a single value.
with table(
'{0}_{1}_gcal_ant'.format(msname, cal.name),
readonly=False
) as tb:
tb.putcol('TIME', np.ones(6)*np.median(mjd)*ct.SECONDS_PER_DAY)
tb.putcol('FLAG', flags.squeeze(axis=1))
tb.putcol('CPARAM', gains.squeeze(axis=1))
_check_path('{0}_{1}_gcal_ant'.format(msname, cal.name))
except Exception as exc:
status = cs.update(status, current_error)
du.exception_logger(logger, calstring, exc, throw_exceptions)
try:
caltime
except NameError:
caltime = Time.now().mjd
return status, caltime
def plot_solutions(
msname, calname, figure_path, show_plots=False, logger=None
):
r"""Plots the antenna delay, gain and bandpass calibration solutions.
Creates separate files for all solutions. To create one plot with all
solutions, use plotting.summary_plot.
Parameters
----------
msname : str
The name of the measurement set. Used to identify the calibration
tables.
calname : str
The name of the calibrator. Used to identify the calibration tables.
antenna_order : list
The antenna names, in order.
fobs : array
The central frequency of each channel, in GHz.
blbased : boolean
True of the calibration was baseline-based.
figure_dir : str
The location to save the figures. Defaults ``./figures``.
show_plots : boolean
If False, plots are closed after being saved. Defaults False.
logger : dsautils.dsa_syslog.DsaSyslogger() instance
Logger to write messages too. If None, messages are printed.
"""
try:
_ = dp.plot_antenna_delays(
msname,
calname,
outname=figure_path,
show=show_plots
)
except RuntimeError:
message = 'Plotting antenna delays failed for {0}'.format(
msname
)
if logger is not None:
logger.info(message)
else:
print(message)
try:
_ = dp.plot_gain_calibration(
msname,
calname,
outname=figure_path,
show=show_plots
)
except RuntimeError:
message = 'Plotting gain calibration solutions failed for {0}'.format(
msname
)
if logger is not None:
logger.info(message)
else:
print(message)
try:
_ = dp.plot_bandpass(
msname,
calname,
outname=figure_path,
show=show_plots
)
except RuntimeError:
message = \
'Plotting bandpass calibration solutions failed for {0}'.format(
msname
)
if logger is not None:
logger.info(message)
else:
print(message)
def calibration_head(obs_params, ant_params, write_to_etcd=False,
throw_exceptions=None, sefd=False, logger=None):
"""Controls calibrtion of a dsa10 or dsa110 dataset.
After calibration, results are writen to etcd.
Parameters
----------
obs_params : list
The observing parameters.
ant_params : list
The antenna configuration.
write_to_etcd : boolean
If set to ``True``, the results of the calibration are pushed to etcd.
Defaults ``False``.
throw_exceptions : boolean
If set to ``False``, exceptions are not raised after being logged to
syslog. Instead, `calibration_head` and `triple_antenna_cal` return the
status value. If set to ``None``, `throw_exceptions` will be set to
``not write_to_etcd``.
sefd : boolean
If set to ``True``, the solutions will be solved against a model of
ones in order to allow fitting of the source pass to the antenna gains
and 60 minutes will be saved to the measurement set. If set to
``False``, a sky model will be used in calibration and only 10 minutes
of data is saved to the measurement set.
logger : dsautils.dsa_syslog.DsaSyslogger() instance
Logger to write messages too. If None, messages are printed.
Returns
-------
int
The status code. Decode with dsautils.calstatus.
"""
if throw_exceptions is None:
throw_exceptions = not write_to_etcd
message = 'Beginning calibration of ms {0}.ms (start time {1}) using source {2}'.format(
obs_params['msname'],
obs_params['utc_start'].isot,
obs_params['cal'].name
)
if logger is not None:
logger.info(message)
else:
print(message)
status, caltime = triple_antenna_cal(obs_params, ant_params,
throw_exceptions, sefd, logger=logger)
message = 'Ending calibration of ms {0}.ms (start time {1}) using source {2} with status {3}'.format(
obs_params['msname'], obs_params['utc_start'].isot,
obs_params['cal'].name, status
)
if logger is not None:
logger.info(message)
else:
print(message)
print('Status: {0}'.format(cs.decode(status)))
print('')
if write_to_etcd:
dmsio.caltable_to_etcd(
obs_params['msname'], obs_params['cal'].name,
ant_params['antenna_order'], caltime, status, logger=logger
)
return status
def _gauss_offset(xvals, amp, mean, sigma, offset):
"""Calculates the value of a Gaussian at the locations `x`.
Parameters
----------
xvals : array
The x values at which to evaluate the Gaussian.
amp, mean, sigma, offset : float
Define the Gaussian: amp * exp(-(x-mean)**2/(2 sigma**2)) + offset
Returns
-------
array
The values of the Gaussian function defined evaluated at xvals.
"""
return amp*np.exp(-(xvals-mean)**2/(2*sigma**2))+offset
def _gauss(xvals, amp, mean, sigma):
"""Calculates the value of a Gaussian at the locations `x`.
Parameters
----------
xvals : array
The x values at which to evaluate the Gaussian.
amp, mean, sigma : float
Define the Gaussian: amp * exp(-(x-mean)**2/(2 sigma**2))
Returns
-------
array
The values of the Gaussian function defined evaluated at xvals.
"""
return _gauss_offset(xvals, amp, mean, sigma, 0.)
def calculate_sefd(
msname, cal, fmin=None, fmax=None, baseline_cal=False, showplots=False,
msname_delaycal=None, calname_delaycal=None, halfpower=False, pols=None
):
r"""Calculates the SEFD from a measurement set.
The measurement set must have been calibrated against a model of ones and
must include autocorrelations.
Parameters
----------
msname : str
The measurement set name. The measurement set `msname`.ms will
be opened.
cal : src class instance
The calibrator source. Will be used to identify the correct
calibration tables. The table `msname`\_`cal.name`\_gacal will
be opened.
fmin : float
The lowest frequency to consider when calculating the off-source power
to use in the SEFD calculation, in GHz. Channels below this frequency
will be flagged. Defaults 1.35.
fmax : float
The greatest frequency to consider when calculating the off-source
power to use in the SEFD calculation, in GHz. Channels above this
frequency will be flagged. Defaults 1.45.
baseline_cal : Boolean
Set to ``True`` if the gain tables were derived using baseline-based
calibration. Set to ``False`` if the gain tables were derived using
antenna-based calibration. Defaults ``True``.
showplots : Boolean
If set to ``True``, plots will be generated that show the Gaussian fits
to the gains. Defaults ``False``.
msname_delaycal : str
The name of the measurement set from which delay solutions should be
applied. Defaults to `msname`.
calname_delaycal : str
The name of the calibrator source from which delay solutions should be
applied. Defaults to `calname`.
halfpower : Boolean
If True, will calculate the sefd using the half-power point instead of
using the off-source power. Defaults False.
pols : list
The labels of the polarization axes. Defaults ['B', 'A'].
Returns
-------
antenna_names : list
The names of the antennas in their order in `sefds`.
sefds : ndarray
The SEFD of each antenna/polarization pair, in Jy. Dimensions (antenna,
polarization).
ant_gains : ndarray
The antenna gains in 1/Jy. Dimensions (antenna, polarization).
ant_transit_time : ndarray
The meridian transit time of the source as seen by each antenna/
polarization pair, in MJD. Dimensions (antenna, polarization).
fref : float
The reference frequency of the SEFD measurements in GHz.
hwhms : float
The hwhms of the calibrator transits in days.
"""
# Change so figures saved if showplots is False
if pols is None:
pols = ['B', 'A']
if msname_delaycal is None:
msname_delaycal = msname
if calname_delaycal is None:
calname_delaycal = cal.name
npol = 2
# Get the visibilities (for autocorrs)
dc.apply_delay_bp_cal(msname, calname_delaycal, msnamecal=msname_delaycal,
blbased=baseline_cal)
vis, tvis, fvis, flag, ant1, ant2, pt_dec, _, _ = dmsio.extract_vis_from_ms(
msname, 'CORRECTED_DATA')
mask = (1-flag).astype(float)
mask[mask < 0.5] = np.nan
vis = vis*mask
vis = vis[ant1 == ant2, ...]
antenna_order = ant1[ant1 == ant2]
nant = len(antenna_order)
# Note that these are antenna idxs, not names
# Open the gain files and read in the gains
gain, time, flag, ant1, ant2 = dmsio.read_caltable(
'{0}_{1}_2gcal'.format(msname, cal.name), cparam=True)
gain[flag] = np.nan
antenna, gain = dmsio.get_antenna_gains(gain, ant1, ant2)
gain = 1/gain
antenna = list(antenna)
idxs = [antenna.index(ant) for ant in antenna_order]
gain = gain[idxs, ...]
assert gain.shape[0] == nant
gain = np.abs(gain*np.conjugate(gain))
gain = np.abs(np.nanmean(gain, axis=2)).squeeze(axis=2)
idxl = np.searchsorted(fvis, fmin) if fmin is not None else 0
idxr = np.searchsorted(fvis, fmax) if fmax is not None else vis.shape[-2]
fref = np.median(fvis[idxl:idxr])
if idxl < idxr:
vis = vis[..., idxl:idxr, :]
else:
vis = vis[..., idxr:idxl, :]
# imag_fraction = np.nanmean((vis.imag/vis.real).reshape(nant, -1),
# axis=-1)
# assert np.nanmax(np.abs(imag_fraction) < 1e-4), ("Autocorrelations have "
# "non-negligable imaginary "
# "components.")
vis = np.abs(vis)
# Complex gain includes an extra relative delay term
# in the phase, but we really only want the amplitude
# We will ignore the phase for now
ant_gains_on = np.zeros((nant, npol))
eant_gains_on = np.zeros((nant, npol))
ant_transit_time = np.zeros((nant, npol))
eant_transit_time = np.zeros((nant, npol))
ant_transit_width = np.zeros((nant, npol))
eant_transit_width = np.zeros((nant, npol))
offbins_before = np.zeros((nant, npol), dtype=int)
offbins_after = np.zeros((nant, npol), dtype=int)
autocorr_gains_off = np.zeros((nant, npol))
ant_gains = np.zeros((nant, npol))
sefds = np.zeros((nant, npol))
hwhms = np.zeros((nant, npol))
expected_transit_time = (
Time(time[0], format='mjd')
-cal.direction.hadec(
obstime=time[0]
)[0]*ct.SECONDS_PER_SIDEREAL_DAY*u.s/(2*np.pi)
).mjd-time[0]
max_flux = df.amplitude_sky_model(
cal,
cal.ra.to_value(u.rad),
pt_dec,
fref
)
if showplots:
nx = 3
ny = nant//nx
if nant%nx != 0:
ny += 1
_fig, ax = plt.subplots(
ny, nx, figsize=(8*nx, 8*ny), sharey=True
)
ccyc = plt.rcParams['axes.prop_cycle'].by_key()['color']
ax = ax.flatten()
# Fit a Gaussian to the gains
for i in range(nant):
for j in range(npol):
if showplots:
ax[i].plot(time-time[0], gain[i, :, j], '.', color=ccyc[j])
initial_params = [np.max(gain[i, :, j]), expected_transit_time,
0.0035] #, 0]
try:
x = time-time[0]
y = gain[i, :, j]
idx = ~np.isnan(y)
assert len(idx) >= 4
params, cov = curve_fit(_gauss, x[idx], y[idx],
p0=initial_params)
except (RuntimeError, ValueError, AssertionError):
params = initial_params.copy()
cov = np.zeros((len(params), len(params)))
ant_gains_on[i, j] = params[0]#+params[3]
ant_gains[i, j] = ant_gains_on[i, j]/max_flux
eant_gains_on[i, j] = np.sqrt(cov[0, 0])#+np.sqrt(cov[3, 3])
ant_transit_time[i, j] = time[0]+params[1]
eant_transit_time[i, j] = np.sqrt(cov[1, 1])
ant_transit_width[i, j] = params[2]
eant_transit_width[i, j] = np.sqrt(cov[2, 2])
if not halfpower:
offbins_before[i, j] = np.searchsorted(
time, ant_transit_time[i, j]-ant_transit_width[i, j]*3)
offbins_after[i, j] = len(time)-np.searchsorted(
time, ant_transit_time[i, j]+ant_transit_width[i, j]*3)
idxl = np.searchsorted(
tvis, ant_transit_time[i, j]-ant_transit_width[i, j]*3)
idxr = np.searchsorted(
tvis, ant_transit_time[i, j]+ant_transit_width[i, j]*3)
autocorr_gains_off[i, j] = np.nanmedian(
np.concatenate(
(vis[i, :idxl, :, j], vis[i, idxr:, :, j]), axis=0))
sefds[i, j] = autocorr_gains_off[i, j]/ant_gains[i, j]
else:
hwhm = np.sqrt(2*np.log(2))*ant_transit_width[i, j]
idxl = np.searchsorted(tvis, ant_transit_time[i, j]-hwhm)
idxr = np.searchsorted(tvis, ant_transit_time[i, j]+hwhm)
autocorr_gains_off[i, j] = np.nanmedian(
np.concatenate(
(vis[i, idxl-10:idxl+10, :, j],
vis[i, idxr-10:idxr+10, :, j]), axis=0))
sefds[i, j] = (
autocorr_gains_off[i, j]/ant_gains[i, j]- max_flux/2
)
hwhms[i, j] = hwhm
if showplots:
ax[i].plot(
time-time[0],
_gauss(time-time[0], *params),
'-',
color=ccyc[j],
label='{0} {1}: {2:.0f} Jy; {3:.03f} min'.format(
antenna_order[i]+1,
pols[j],
sefds[i, j],
(
ant_transit_time[i, j]
-time[0]
-expected_transit_time
)*ct.SECONDS_PER_DAY/60
)
)
ax[i].legend()
# ax[i].axvline(expected_transit_time, color='k')
ax[i].set_xlabel("Time (d)")
ax[i].set_ylabel("Unnormalized power")
if showplots:
max_gain = np.nanmax(ant_gains_on)
ax[0].set_ylim(-0.1*max_gain, 1.1*max_gain)
return antenna_order+1, sefds, ant_gains, ant_transit_time, fref, hwhms
def dsa10_cal(fname, msname, cal, pt_dec, antpos, refant, badants=None):
"""Calibrate dsa10 data.
Parameters
----------
fname : str
The fits file containing the correlated dsa10 data.
msname : str
The measurement set containing the correlated dsa10 data.
cal : dsautils.src instance
The calibrator source.
pt_dec : float
The pointing declination of the array in radians.
antpos : str
The path to the ITRF file containing the antenna positions.
refant : str or int
The reference antenna name (if str) or index (if int).
badants : list(str)
The naems of antennas that should be flagged before calibration.
"""
# TODO: get header information from the ms instead of the fits file.
if badants is None:
badants = []
for file_path in ['{0}.ms'.format(msname),
'{0}_{1}_kcal'.format(msname, cal.name),
'{0}_{1}_gacal'.format(msname, cal.name),
'{0}_{1}_gpcal'.format(msname, cal.name),
'{0}_{1}_bcal'.format(msname, cal.name),
'{0}_{1}_2kcal'.format(msname, cal.name)]:
if os.path.exists(file_path):
shutil.rmtree(file_path)
fobs, blen, bname, tstart, _tstop, tsamp, vis, mjd, lst, _transit_idx, \
antenna_order = dfio.read_psrfits_file(
fname, cal, dur=10*u.min, antpos=antpos, badants=badants)
df.fringestop(vis, blen, cal, mjd, fobs, pt_dec)
amp_model = df.amplitude_sky_model(cal, lst, pt_dec, fobs)
amp_model = np.tile(amp_model[np.newaxis, :, :, np.newaxis],
(vis.shape[0], 1, 1, vis.shape[-1]))
dmsio.convert_to_ms(cal, vis, tstart, msname, bname, antenna_order,
tsamp=tsamp, nint=25, antpos=antpos,
model=amp_model)
_check_path('{0}.ms'.format(msname))
dc.flag_zeros(msname)
if '8' in antenna_order:
dc.flag_antenna(msname, '8', pol='A')
dc.delay_calibration(msname, cal.name, [refant])
_check_path('{0}_{1}_kcal'.format(msname, cal.name))
dc.gain_calibration(
msname,
cal.name,
refant=refant,
forsystemhealth=True
)
for tbl in ['gacal', 'gpcal', 'bcal']:
_check_path('{0}_{1}_{2}'.format(msname, cal.name, tbl))
def flag_pixels(msname, thresh=6.0, logger=None):
"""Flags pixels using dsautils.mask_bad_pixels.
Parameters
----------
msname : str
The path to the measurement set. Opens `msname`.ms
thresh : float
The RFI threshold in units of standard deviation. Anything above
thresh*stddev + mean will be flagged.
"""
# Flag RFI - only for single spw
vis, _, _, flags, ant1, ant2, _, _, orig_shape = extract_vis_from_ms(
msname,
)
good_pixels, fraction_flagged = du.mask_bad_pixels(
vis.squeeze(2),
mask=~flags.squeeze(2),
thresh=thresh
)
# # Not properly account for shape - getting repeat messages
# (idx1s, idx2s) = np.where(fraction_flagged > 0.3)
# for idx1 in idx1s:
# for idx2 in idx2s:
# message = \
# 'Baseline {0}-{1} {2}: {3} percent of data flagged'.format(
# ant1[idx1],
# ant2[idx1],
# 'A' if idx2==1 else 'B',
# fraction_flagged[idx1, idx2]*100
# )
# if logger is not None:
# logger.info(message)
# else:
# print(message)
flags = flags + ~good_pixels[:, :, np.newaxis, :, :]
if orig_shape[0] == 'time':
flags = flags.swapaxes(0, 1)
with table('{0}.ms'.format(msname), readonly=False) as tb:
shape = np.array(tb.getcol('FLAG')[:]).shape
tb.putcol('FLAG', flags.reshape(shape))
def flag_antennas_using_delays(
antenna_delays, kcorr, msname, kcorr_thresh=0.3, logger=None
):
"""Flags antennas by comparing the delay on short times to the delay cal.
Parameters
----------
antenna_delays : ndarray
The antenna delays from the 2kcal calibration file, calculated on short
timescales.
kcorr : ndarray
The antenna delays from the kcal calibration file, calculated over the
entire calibration pass.
msname : str
The path to the measurement set. Will open `msname`.ms
kcorr_thresh : float
The tolerance for descrepancies between the antenna_delays and kcorr,
in nanoseconds.
logger : dsautils.dsa_syslog.DsaSyslogger() instance
Logger to write messages too. If None, messages are printed.
"""
error = 0
percent_bad = (
np.abs(antenna_delays-kcorr) > 1
).sum(1).squeeze(1).squeeze(1)/antenna_delays.shape[1]
for i in range(percent_bad.shape[0]):
for j in range(percent_bad.shape[1]):
if percent_bad[i, j] > kcorr_thresh:
error += not dc.flag_antenna(msname, '{0}'.format(i+1),
pol='A' if j==0 else 'B')
message = 'Flagged antenna {0}{1} in {2}'.format(
i+1, 'A' if j==0 else 'B', msname
)
if logger is not None:
logger.info(message)
else:
print(message)
return error
def calibrate_measurement_set(
msname, cal, refants, throw_exceptions=True, bad_antennas=None,
bad_uvrange='2~27m', keepdelays=False, forsystemhealth=False,
interp_thresh=1.5, interp_polyorder=7, blbased=False, manual_flags=None,
logger=None
):
r"""Calibrates the measurement set.
Calibration can be done with the aim of monitoring system health (set
`forsystemhealth=True`), obtaining beamformer weights (set
`forsystemhealth=False` and `keepdelays=False`), or obtaining delays (set
`forsystemhealth=False` and `keepdelays=True`, new beamformer weights will
be generated as well).
Parameters
----------
msname : str
The name of the measurement set. Will open `msname`.ms
cal : dsacalib.utils.src instance
The calibration source. Calibration tables will begin with
`msname`\_`cal.name`
refant : str or int
The reference antenna name (if str) or index (if int) for calibration.
throw_exceptions : bool
If set to False, exceptions will not be thrown, although they will be
logged to syslog. Defaults True.
bad_antennas : list(str)
Antennas (names) to be flagged before calibration.
bad_uvrange : str
Baselines with lengths within bad_uvrange will be flagged before
calibration. Must be a casa-understood string with units.
keepdelays : bool
Only used if `forsystemhealth` is False. If `keepdelays` is set to
False and `forsystemhealth` is set to False, then delays are integrated
into the bandpass solutions and the kcal table is set to all zeros. If
`keepdelays` is set to True and `forsystemhealth` is set to False, then
delays are kept at 2 nanosecond resolution. If `forsystemhealth` is
set to True, delays are kept at full resolution regardless of the
keepdelays parameter. Defaults False.
forsystemhealth : bool
Set to True for full-resolution delay and bandpass solutions to use to
monitor system health, or to False to generate beamformer weights and
delays. Defaults False.
interp_thresh: float
Used if `forsystemhealth` is False, when smoothing bandpass gains.
The gain amplitudes and phases are fit using a polynomial after any
points more than interp_thresh*std away from the median-filtered trend
are flagged.
interp_polyorder : int
Used if `forsystemhealth` is False, when smoothing bandpass gains.
The gain amplitudes and phases are fit using a polynomial of order
interp_polyorder.
blbased : boolean
Set to True for baseline-based calibration, False for antenna-based
calibration.
manual_flags : list(str)
Include any additional flags to be done prior to calibration, as
CASA-understood strings.
logger : dsautils.dsa_syslog.DsaSyslogger() instance
Logger to write messages too. If None, messages are printed.
Returns
-------
int
A status code. Decode with dsautils.calstatus
"""
if isinstance(refants, (int,str)):
refant = refants
refants = [refant]
else:
refant = refants[0]
print('entered calibration')
status = 0
current_error = cs.UNKNOWN_ERR
calstring = 'initialization'
try:
# Remove files that we will create so that things will fail if casa
# doesn't write a table.
print('removing files')
tables_to_remove = [
'{0}_{1}_2kcal'.format(msname, cal.name),
'{0}_{1}_kcal'.format(msname, cal.name),
'{0}_{1}_bkcal'.format(msname, cal.name),
'{0}_{1}_gacal'.format(msname, cal.name),
'{0}_{1}_gpcal'.format(msname, cal.name),
'{0}_{1}_bcal'.format(msname, cal.name)
]
if forsystemhealth:
tables_to_remove += [
'{0}_{1}_2gcal'.format(msname, cal.name)
]
for path in tables_to_remove:
if os.path.exists(path):
shutil.rmtree(path)
print('flagging of ms data')
calstring = "flagging of ms data"
current_error = (
cs.FLAGGING_ERR |
cs.INV_GAINAMP_P1 |
cs.INV_GAINAMP_P2 |
cs.INV_GAINPHASE_P1 |
cs.INV_GAINPHASE_P2 |
cs.INV_DELAY_P1 |
cs.INV_DELAY_P2 |
cs.INV_GAINCALTIME |
cs.INV_DELAYCALTIME
)
print('resetting flags')
# Reset flags in the measurement set
dc.reset_flags(msname, datacolumn='data')
dc.reset_flags(msname, datacolumn='model')
dc.reset_flags(msname, datacolumn='corrected')
print('flagging baselines')
current_error = (
cs.FLAGGING_ERR
)
error = dc.flag_baselines(msname, uvrange=bad_uvrange)
if error > 0:
message = 'Non-fatal error occured in flagging short baselines of {0}.'.format(msname)
if logger is not None:
logger.warning(message)
else:
print(message)
print('flagging zeros')
error = dc.flag_zeros(msname)
if error > 0:
message = 'Non-fatal error occured in flagging zeros of {0}.'.format(msname)
if logger is not None:
logger.warning(message)
else:
print(message)
print('flagging antennas')
if bad_antennas is not None:
for ant in bad_antennas:
error = dc.flag_antenna(msname, ant)
if error > 0:
message = 'Non-fatal error occured in flagging ant {0} of {1}.'.format(ant, msname)
if logger is not None:
logger.warning(message)
else:
print(message)
if manual_flags is not None:
for entry in manual_flags:
dc.flag_manual(msname, entry[0], entry[1])
print('flagging rfi')
flag_pixels(msname)
if error > 0:
message = 'Non-fatal error occured in flagging bad pixels of {0}.'.format(msname)
if logger is not None:
logger.warning(message)
else:
print(message)
print('delay cal')
# Antenna-based delay calibration
calstring = 'delay calibration'
current_error = (
cs.DELAY_CAL_ERR |
cs.INV_GAINAMP_P1 |
cs.INV_GAINAMP_P2 |
cs.INV_GAINPHASE_P1 |
cs.INV_GAINPHASE_P2 |
cs.INV_DELAY_P1 |
cs.INV_DELAY_P2 |
cs.INV_GAINCALTIME |
cs.INV_DELAYCALTIME
)
error = dc.delay_calibration(
msname,
cal.name,
refants=refants
)
if error > 0:
status = cs.update(status, cs.DELAY_CAL_ERR )
message = 'Non-fatal error occured in delay calibration of {0}.'.format(msname)
if logger is not None:
logger.warning(message)
else:
print(message)
_check_path('{0}_{1}_kcal'.format(msname, cal.name))
print('flagging based on delay cal')
calstring = 'flagging of ms data'
current_error = (
cs.FLAGGING_ERR |
cs.INV_GAINAMP_P1 |
cs.INV_GAINAMP_P2 |
cs.INV_GAINPHASE_P1 |
cs.INV_GAINPHASE_P2 |
cs.INV_GAINCALTIME
)
_times, antenna_delays, kcorr, _ant_nos = dp.plot_antenna_delays(
msname, cal.name, show=False)
error += flag_antennas_using_delays(antenna_delays, kcorr, msname)
if error > 0:
status = cs.update(status, cs.FLAGGING_ERR)
message = 'Non-fatal error occured in flagging of bad timebins on {0}'.format(msname)
if logger is not None:
logger.warning(message)
else:
print(message)
try:
_check_path('{0}_{1}_2kcal'.format(msname, cal.name))
except AssertionError:
status = cs.update(status, cs.FLAGGING_ERR)
message = 'Non-fatal error occured in flagging of bad timebins on {0}'.format(msname)
if logger is not None:
logger.warning(message)
else:
print(message)
print('delay cal again')
# Antenna-based delay calibration
calstring = 'delay calibration'
current_error = (
cs.DELAY_CAL_ERR |
cs.INV_GAINAMP_P1 |
cs.INV_GAINAMP_P2 |
cs.INV_GAINPHASE_P1 |
cs.INV_GAINPHASE_P2 |
cs.INV_DELAY_P1 |
cs.INV_DELAY_P2 |
cs.INV_GAINCALTIME |
cs.INV_DELAYCALTIME
)
shutil.rmtree('{0}_{1}_kcal'.format(msname, cal.name))
shutil.rmtree('{0}_{1}_2kcal'.format(msname, cal.name))
error = dc.delay_calibration(msname, cal.name, refants=refants)
if error > 0:
status = cs.update(status, cs.DELAY_CAL_ERR )
message = 'Non-fatal error occured in delay calibration ' + \
'of {0}.'.format(msname)
if logger is not None:
logger.warning(message)
else:
print(message)
_check_path('{0}_{1}_kcal'.format(msname, cal.name))
print('bandpass and gain cal')
calstring = 'bandpass and gain calibration'
current_error = (
cs.GAIN_BP_CAL_ERR |
cs.INV_GAINAMP_P1 |
cs.INV_GAINAMP_P2 |
cs.INV_GAINPHASE_P1 |
cs.INV_GAINPHASE_P2 |
cs.INV_GAINCALTIME
)
error = dc.gain_calibration(
msname,
cal.name,
refant,
blbased=blbased,
forsystemhealth=forsystemhealth,
keepdelays=keepdelays,
interp_thresh=interp_thresh,
interp_polyorder=interp_polyorder
)
if error > 0:
status = cs.update(status, cs.GAIN_BP_CAL_ERR)
message = 'Non-fatal error occured in gain/bandpass calibration of {0}.'.format(msname)
if logger is not None:
logger.warning(message)
else:
print(message)
fnames = [
'{0}_{1}_bcal'.format(msname, cal.name),
'{0}_{1}_bacal'.format(msname, cal.name),
'{0}_{1}_bpcal'.format(msname, cal.name),
'{0}_{1}_gpcal'.format(msname, cal.name),
'{0}_{1}_gacal'.format(msname, cal.name)
]
if forsystemhealth:
fnames += [
'{0}_{1}_2gcal'.format(msname, cal.name)
]
if not keepdelays and not forsystemhealth:
fnames += [
'{0}_{1}_bkcal'.format(msname, cal.name)
]
for fname in fnames:
_check_path(fname)
print('combining bandpass and delay solns')
# Combine bandpass solutions and delay solutions
with table('{0}_{1}_bacal'.format(msname, cal.name)) as tb:
bpass = np.array(tb.CPARAM[:])
with table('{0}_{1}_bpcal'.format(msname, cal.name)) as tb:
bpass *= np.array(tb.CPARAM[:])
if not forsystemhealth:
with table('{0}_{1}_bkcal'.format(msname, cal.name)) as tb:
bpass = np.array(tb.CPARAM[:])
with table(
'{0}_{1}_bcal'.format(msname, cal.name),
readonly=False
) as tb:
tb.putcol('CPARAM', bpass)
if not forsystemhealth:
tbflag = np.array(tb.FLAG[:])
tb.putcol('FLAG', np.zeros(tbflag.shape, tbflag.dtype))
except Exception as exc:
status = cs.update(status, current_error)
du.exception_logger(logger, calstring, exc, throw_exceptions)
print('end of cal routine')
return status
def cal_in_datetime(dt, transit_time, duration=5*u.min, filelength=15*u.min):
"""Check to see if a transit is in a given file.
Parameters
----------
dt : str
The start time of the file, given as a string.
E.g. '2020-10-06T23:19:02'
transit_time : astropy.time.Time instance
The transit time of the source.
duration : astropy quantity
The amount of time around transit you are interested in, in minutes or
seconds.
filelength : astropy quantity
The length of the hdf5 file, in minutes or seconds.
Returns
-------
bool
True if at least part of the transit is within the file, else False.
"""
filestart = Time(dt)
fileend = filestart+filelength
transitstart = transit_time-duration/2
transitend = transit_time+duration/2
# For any of these conditions,
# the file contains data that we want
if (filestart < transitstart) and (fileend > transitend):
transit_file = True
elif (filestart > transitstart) and (fileend < transitend):
transit_file = True
elif (fileend > transitstart) and \
(fileend-transitstart < duration):
transit_file = True
elif (filestart < transitend) and \
(transitend-filestart) < duration:
transit_file = True
else:
transit_file = False
return transit_file
def get_files_for_cal(
caltable, refcorr='01', duration=5*u.min, filelength=15*u.min,
hdf5dir='/mnt/data/dsa110/correlator/', date_specifier='*'):
"""Returns a dictionary containing the filenames for each calibrator pass.
Parameters
----------
caltable : str
The path to the csv file containing calibrators of interest.
refcorr : str
The reference correlator to search for recent hdf5 files from. Searches
the directory `hdf5dir`/corr`refcorr`/
duration : astropy quantity
The duration around transit which you are interested in extracting, in
minutes or seconds.
filelength : astropy quantity
The length of the hdf5 files, in minutes or seconds.
hdf5dir : str
The path to the hdf5 files.
date_specifier : str
A specifier to include to limit the dates for which you are interested
in. Should be something interpretable by glob and should be to the
second precision. E.g. `2020-10-06*`, `2020-10-0[678]*` and
`2020-10-06T01:03:??` are all valid.
Returns
-------
dict
A dictionary specifying the hdf5 filenames that correspond to the
requested datesand calibrators.
"""
calsources = pandas.read_csv(caltable, header=0)
files = sorted(
glob.glob(
'{0}/corr{1}/{2}.hdf5'.format(
hdf5dir,
refcorr,
date_specifier
)
)
)
datetimes = [f.split('/')[-1][:19] for f in files]
if len(np.unique(datetimes)) != len(datetimes):
print('Multiple files exist for the same time.')
dates = np.unique([dt[:10] for dt in datetimes])
filenames = dict()
for date in dates:
filenames[date] = dict()
for _index, row in calsources.iterrows():
if isinstance(row['ra'], str):
rowra = row['ra']
else:
rowra = row['ra']*u.deg
if isinstance(row['dec'], str):
rowdec = row['dec']
else:
rowdec = row['dec']*u.deg
cal = du.src(
row['source'],
ra=Angle(rowra),
dec=Angle(rowdec),
I=row['flux (Jy)']
)
midnight = Time('{0}T00:00:00'.format(date))
delta_lst = -1*(
cal.direction.hadec(midnight.mjd)[0]
)%(2*np.pi)
transit_time = (
midnight + delta_lst/(2*np.pi)*ct.SECONDS_PER_SIDEREAL_DAY*u.s
)
assert transit_time.isot[:10]==date
# Get the filenames for each calibrator transit
transit_files = []
for dt in datetimes:
if cal_in_datetime(dt, transit_time, duration, filelength):
transit_files += [dt]
filenames[date][cal.name] = {
'cal': cal,
'transit_time': transit_time,
'files': transit_files
}
return filenames
| [
37811,
9771,
571,
1358,
8027,
329,
360,
4090,
12,
11442,
36537,
351,
35106,
32,
13,
198,
198,
13838,
25,
22937,
3184,
446,
11,
288,
2271,
13,
14323,
446,
31,
459,
305,
13,
9948,
13670,
13,
15532,
11,
12131,
14,
3312,
198,
37811,
198... | 1.981748 | 26,024 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from trackpy.static import *
import pandas as pd
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal, assert_array_less
from trackpy.static import cluster
from trackpy.tests.common import StrictTestCase
def _points_ring3D(r_edges, dr, n):
"""Returns x, y, z array of points comprising shells extending from r to
r_dr. n determines the number of points in the ring. Rings are generated by
constructing a unit sphere and projecting every point onto a shell of
thickness dr"""
refx_all, refy_all, refz_all = [], [], []
for r in r_edges:
ref = 2*np.random.random(size=(n, 3)) - 1
ref /= np.linalg.norm(ref, axis=1).repeat(3).reshape((len(ref), 3))
ref *= dr*np.random.random(size=(len(ref), 3)) + r
x, y, z = ref[:, 0], ref[:, 1], ref[:, 2]
refx_all.append(x)
refy_all.append(y)
refz_all.append(z)
return np.array(refx_all), np.array(refy_all), np.array(refz_all)
if __name__ == '__main__':
import unittest
unittest.main()
| [
6738,
11593,
37443,
834,
1330,
357,
48546,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
28000,
1098,
62,
17201... | 2.473795 | 477 |
""" Provide a container for building code segments. """
__author__ = "Brian Allen Vanderburg II"
__copyright__ = "Copyright 2016"
__license__ = "Apache License 2.0"
class CodeBuilder(object):
""" A code builder class. """
INDENT = 4
def __init__(self, indent=0):
""" Initialize the bulder with a given indentation level. """
self._indent = indent
self._blocks = []
def add_line(self, line):
""" Add a line to the builder. """
self._blocks.extend([" " * self._indent, line, "\n"])
def indent(self):
""" Increase the indentation level. """
self._indent += self.INDENT
def dedent(self):
""" Decrease the indentation level. """
self._indent -= self.INDENT
def add_section(self):
""" Add a section that can be filled in later. """
section = CodeBuilder(self._indent)
self._blocks.append(section)
return section
def __str__(self):
""" Return the current code. """
return "".join(str(i) for i in self._blocks)
def execute(self):
""" Execute the code and return the globals dictionary. """
_globals = {}
exec(str(self), _globals)
return _globals
| [
37811,
44290,
257,
9290,
329,
2615,
2438,
17894,
13,
37227,
198,
198,
834,
9800,
834,
220,
220,
220,
220,
220,
796,
366,
24761,
9659,
26669,
7423,
2873,
1,
198,
834,
22163,
4766,
834,
220,
220,
796,
366,
15269,
1584,
1,
198,
834,
43... | 2.523232 | 495 |
import os
import urllib
import urllib2
import urlparse
import cookielib
import time
from bs4 import BeautifulSoup | [
11748,
28686,
198,
11748,
2956,
297,
571,
198,
11748,
2956,
297,
571,
17,
198,
11748,
19016,
29572,
198,
11748,
4255,
8207,
571,
198,
11748,
640,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486
] | 3.323529 | 34 |
from xml.etree import ElementTree as ET
from explorecourses import *
| [
6738,
35555,
13,
316,
631,
1330,
11703,
27660,
355,
12152,
198,
198,
6738,
7301,
66,
39975,
1330,
1635,
198
] | 3.684211 | 19 |
import numpy as np
"""
Stochastic bisection algorithm,
as described in
"Probabilistic bisection converges almost
as quickly as stochastic approximation,
Peter I. Frazier, Shane G. Henderson, Rolf Waeber"
"""
TEST_VARIABLE = "TESTVARIABLE"
def stochastic_bisection(measure,gamma=0.9,maxiter=100,maxdrift=500,tol=1e-3,
verbose=0):
"""
measure : function that takes a scalar as value and returns
a noisy measurement of some 1d function f:[0,1] -> R
gamma : gamma factor for drift test, as described in the article
maxiter : maximum number of iterations of algorithm
maxdrift : maximum number of iterations for each drift test
verbose : frequency of printings of x_m
tol : tolerance (NOT IMPLEMENTED YET)
"""
pc = 1.0 - gamma/2
p0 = pc-1e-2
points = [0.0,1.0]
values = [0.0,1.0]
x_m = 0.5
x_r0 = x_m
running_alpha = 0.1
if verbose == 0:
verbose = maxiter+1
for n in range(maxiter):
sign_func = lambda : np.sign(measure(x_m))
z_m = _drift_test(sign_func,gamma,maxdrift)
if z_m == -1:
p_update = p0
elif z_m == 1:
p_update = 1-p0
else:
continue
points,values = _update_cdf(x_m,p_update,points,values)
x_m = _get_median(points,values)
x_r = x_r0 + running_alpha*(x_m-x_r0)
if n >= 10 and np.abs(x_r-x_r0) <= tol:
break
else:
x_r0 = x_r
if (n+1)%verbose == 0:
print(x_r,x_m)
print("Finished")
return x_r
| [
11748,
299,
32152,
355,
45941,
198,
37811,
198,
220,
220,
220,
520,
5374,
3477,
47457,
3213,
11862,
11,
220,
198,
220,
220,
220,
355,
3417,
287,
198,
220,
220,
220,
366,
2964,
65,
14991,
2569,
47457,
3213,
6718,
3212,
2048,
220,
198,
... | 2.003686 | 814 |
# -*- coding: utf-8 -*-
"""Setup module."""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_requires():
"""Read requirements.txt."""
requirements = open("requirements.txt", "r").read()
return list(filter(lambda x: x != "", requirements.split()))
def read_description():
"""Read README.md and CHANGELOG.md."""
try:
with open("README.md") as r:
description = "\n"
description += r.read()
with open("CHANGELOG.md") as c:
description += "\n"
description += c.read()
return description
except Exception:
return '''Breathing gymnastics application'''
setup(
name='nafas',
packages=['nafas'],
version='0.1',
description='Breathing gymnastics application',
long_description=read_description(),
long_description_content_type='text/markdown',
author='Sepand Haghighi',
author_email='info@pycm.ir',
url='https://github.com/sepandhaghighi/nafas',
download_url='https://github.com/sepandhaghighi/nafas/tarball/v0.1',
keywords="python3 python breath breathing meditation",
project_urls={
'Source': 'https://github.com/sepandhaghighi/nafas',
},
install_requires=get_requires(),
python_requires='>=3.5',
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Other Audience',
'Topic :: Games/Entertainment',
'Topic :: Utilities',
],
license='MIT',
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
40786,
8265,
526,
15931,
198,
28311,
25,
198,
220,
220,
220,
422,
900,
37623,
10141,
1330,
9058,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
422,
1233,
2679... | 2.526642 | 807 |
# -*- coding: utf-8 -*-
import random, time
from collections import Counter
with open('dicionario_reduzido.txt') as arquivo:
dicionario = arquivo.read().splitlines()
tp_result = open('tp_v02.txt', 'w+')
p = 0
for palavra in dicionario:
num_caracteres = len(palavra)
t1 = time.perf_counter()
dic_filtrado = [i for i in dicionario if len(i) == num_caracteres]
testadas = []
erradas = []
resultado = list('_' * num_caracteres)
while resultado.count('_') > 0:
testa_letra(seleciona_letra(dic_filtrado))
resultado = ''.join(resultado)
t2 = time.perf_counter()
p += 1
print(p)
tp_result.write(f'{palavra},{num_caracteres},{t2-t1},{len(erradas)}\n')
tp_result.close()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
11748,
4738,
11,
640,
201,
198,
6738,
17268,
1330,
15034,
201,
198,
201,
198,
4480,
1280,
10786,
67,
47430,
4982,
62,
445,
10277,
17305,
13,
14116,
11537,... | 2.070845 | 367 |
from Model.Layer import Layer
| [
6738,
9104,
13,
49925,
1330,
34398,
201,
198,
201,
198
] | 3.3 | 10 |
import os
from os import path
import tempfile
from hashlib import sha1
from contextlib import contextmanager
from checksum import CHUNK_SIZE
| [
11748,
28686,
198,
6738,
28686,
1330,
3108,
198,
11748,
20218,
7753,
198,
6738,
12234,
8019,
1330,
427,
64,
16,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
198,
6738,
8794,
388,
1330,
5870,
4944,
42,
62,
33489,
198
] | 3.736842 | 38 |
import paho.mqtt.client as mqtt
import threading
import json
import uuid
import time
import sys
import logging
from runtimemngr.runtime import RuntimeView
from runtimemngr.msgdefs import Action, Result, ARTSResponse
DEBUG=False
# TODO: do not start a new thread for each timeout
| [
198,
11748,
279,
17108,
13,
76,
80,
926,
13,
16366,
355,
285,
80,
926,
198,
11748,
4704,
278,
198,
11748,
33918,
198,
11748,
334,
27112,
198,
11748,
640,
198,
11748,
25064,
198,
11748,
18931,
198,
198,
6738,
1057,
16514,
368,
782,
81,... | 3.224719 | 89 |
#!/usr/bin/env python
from ibidem.advent_of_code.util import get_input_name
if __name__ == "__main__":
part1()
part2()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
24283,
28913,
13,
324,
1151,
62,
1659,
62,
8189,
13,
22602,
1330,
651,
62,
15414,
62,
3672,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
19... | 2.350877 | 57 |
import forms_builder.forms.urls # add this import
import dimension.urls
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
# Examples:
# url(r'^$', 'orchid.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
(r'', include('core.urls')),
url(r'^forms/', include(forms_builder.forms.urls)),
url(r'^forms/', include(forms_builder.forms.urls)),
url(r'^dimension/', include(dimension.urls, namespace="dimension")),
url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='js_reverse'),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns(
'',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| [
11748,
5107,
62,
38272,
13,
23914,
13,
6371,
82,
220,
1303,
751,
428,
1330,
198,
11748,
15793,
13,
6371,
82,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
2291,
11,
... | 2.582888 | 374 |
import click
from strava.utils import input_tuple_to_secs
from strava.commands.activity_default import get_activity_from_ids
from strava.decorators import output_option, login_required
_ACTIVITY_COLUMNS = ('key', 'value')
@click.command(name='constrain',
help='Constrain the time of the activity on both side for deeper analysis. Currently only for bike, run and workout.'
)
@click.argument('activity_id', required=True, nargs=1)
@click.option('--from', '-f', 'from_', nargs=3, type=int, default=None,
help='Select the start time to narrow the computation to a specific part of the activity.\n If not select the start of the activity is used.\n Need to be entered as 3 numbers, first is the hours, second the minutes ans last the seconds.')
@click.option('--to', '-t', 'to', nargs=3, type=int, default=None,
help='Select the end time to narrow the computation to a specific part of the activity.\n If not select the end of the activity is used.\n Need to be entered as 3 numbers, first is the hours, second the minutes ans last the seconds.')
@click.option('--ftp', type=int,
help='Specify an FTP to overwrite strava FTP.')
@output_option()
@login_required
| [
11748,
3904,
198,
6738,
3534,
6862,
13,
26791,
1330,
5128,
62,
83,
29291,
62,
1462,
62,
2363,
82,
198,
198,
6738,
3534,
6862,
13,
9503,
1746,
13,
21797,
62,
12286,
1330,
651,
62,
21797,
62,
6738,
62,
2340,
198,
6738,
3534,
6862,
13,... | 3.007317 | 410 |
import torch
import unittest
import numpy as np
from time import time
try:
import apex.amp as amp
from apex.amp import half_function
except (ModuleNotFoundError, ImportError) as e:
amp = None
from .compat import half_function
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .compat import custom_fwd, custom_bwd
try:
import fused_dropout_add_cuda
except (ModuleNotFoundError, ImportError) as e:
fused_dropout_add_cuda = None
@half_function
if __name__ == '__main__':
batch_size = 512
seq_len = 64
hidden_size = 1024
num_iters = 100
dropout = 0.0
#
unittest.main()
| [
11748,
28034,
198,
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
640,
1330,
640,
198,
198,
28311,
25,
198,
220,
220,
220,
1330,
40167,
13,
696,
355,
20766,
198,
220,
220,
220,
422,
40167,
13,
696,
1330,
2063,
... | 2.653846 | 260 |
import sys
import io
sys.stdin = open("ALDS1_9_C_in4.txt", 'r')
#tmp = input()
# copy the below part and paste to the submission form.
# ---------function------------
import sys
import heapq
nodes = []
outputs = [None] * 2000000
_num_outputs = 0
calc_time = True
if calc_time:import time
main()
# -----------------------------
sys.stdin = sys.__stdin__
| [
11748,
25064,
201,
198,
11748,
33245,
201,
198,
201,
198,
201,
198,
17597,
13,
19282,
259,
796,
1280,
7203,
1847,
5258,
16,
62,
24,
62,
34,
62,
259,
19,
13,
14116,
1600,
705,
81,
11537,
201,
198,
2,
22065,
796,
5128,
3419,
201,
19... | 2.587838 | 148 |
import pathlib
| [
11748,
3108,
8019,
628
] | 4 | 4 |
import pytest
from sitri.providers.contrib.ini import IniConfigProvider
| [
11748,
12972,
9288,
198,
198,
6738,
1650,
380,
13,
15234,
4157,
13,
3642,
822,
13,
5362,
1330,
554,
72,
16934,
29495,
628,
628,
628
] | 3.25 | 24 |
import numpy as np
import matplotlib.pyplot as plt
import pickle
from handyTools import stats
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
# import load_xmm_data as xmm
with open('features.pkl', 'rb') as f:
feature_names, features, labels = pickle.load(f)
# features = features[:,:-4] # Disregard the hardness ratios
# feature_names = feature_names[:-4] # Disregard the hardness ratios
# Clean up the NaNs & Infs mess...
features[(np.isnan(features) | np.isinf(features)).nonzero()] = 0
# Normalize the data
features[np.abs(features) > 1e4] = 1e4 * np.sign(features[np.abs(features) > 1e4])
features = features - np.mean(features, axis=0)
features = features / np.max(np.abs(features), axis=0)
# features = features / np.std(features, axis=0)
# features[:,19] = features[:,19] / np.max(np.abs(features[:,19]))
features = np.delete(features, (labels[:,3] == 1).nonzero(), axis=0)
labels = np.delete(labels, (labels[:,3] == 1).nonzero(), axis=0)
labels = np.delete(labels, [3], axis=1)
labels = np.argmax(labels, axis=1)
# Dimensionality reduction: PCA
pca = PCA(
n_components=10,
copy=True,
whiten=False,
svd_solver='auto'
)
features_pca = pca.fit_transform(features)
# Split data into training and test sets
ind = np.random.choice(range(features.shape[0]), 1024, replace=False)
v_data = features_pca[ind,:] # Validation
v_labels = labels[ind]
t_data = np.delete(features_pca, ind, axis=0) # Training
t_labels = np.delete(labels, ind, axis=0)
# Create the Random Forest Classifier
rf = RandomForestClassifier(
max_features='auto',
class_weight='balanced',
n_jobs=-1,
n_estimators=100,
criterion='gini'
)
# Train the classifier
rf.fit(t_data, t_labels)
# Test on validation data
p_cls = rf.predict(v_data)
cls = [
'XRB',
'CV',
'GRB',
# 'SSS',
'Star',
'Galaxy',
'AGN',
'ULX'
]
cm, fig = stats.plot_confusion_matrix(v_labels, p_cls, class_names=cls, normalize=False)
plt.title('Random Forest - validation data')
# Calculate accuracy
n = cm.shape[0]
acc = np.sum(cm[range(n), range(n)]) / np.sum(cm)
print('Accuracy on validation data: {:.3%}'.format(acc))
# Plot feature importance
i = np.argsort(rf.feature_importances_)
fig2 = plt.figure()
# plt.barh(range(len(feature_names)), rf.feature_importances_[i], color='k', alpha=0.5)
plt.barh(range(i.size), rf.feature_importances_[i], color='k', alpha=0.5)
plt.xlim(xmin=-0.5 * max(rf.feature_importances_))
plt.axis('off')
for x, t, name in zip(range(features_pca.shape[1]), rf.feature_importances_[i], i):
plt.text(t + 0.01 * max(rf.feature_importances_), x,
'{:.1%}'.format(t), verticalalignment='center')
plt.text(-0.01 * max(rf.feature_importances_), x,
name, verticalalignment='center', horizontalalignment='right')
plt.title('Feature importance')
# plt.tight_layout()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2298,
293,
198,
198,
6738,
15728,
33637,
1330,
9756,
198,
6738,
1341,
35720,
13,
1072,
11306,
1330,
14534,
34605,
9487,
7483,
198,
... | 2.501294 | 1,159 |
import torch
import copy
from tqdm import tqdm
import torch.nn.functional as F
from ogb.nodeproppred import PygNodePropPredDataset, Evaluator
from typing import Optional, List, Union
from torch_geometric.typing import OptPairTensor, Adj, Size, OptTensor
from torch.utils.checkpoint import checkpoint
from torch import Tensor
from torch.nn import Parameter
from torch.nn import Sequential, Linear, ReLU, Dropout
from torch.nn import BatchNorm1d, LayerNorm, InstanceNorm1d
from torch_sparse import SparseTensor
from torch_scatter import scatter, scatter_softmax
from torch_geometric.nn.conv import MessagePassing
from utils import *
| [
11748,
28034,
198,
11748,
4866,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
267,
22296,
13,
77,
375,
538,
305,
381,
445,
1330,
9485,
70,
19667,
24331,
39156,
27354,
29... | 3.410811 | 185 |
# -*- coding: utf-8 -*-
import logging
import pytz
from datetime import datetime, timedelta
from dateutil import tz
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.auth import authenticate
from django.db.models import Q, Prefetch, Sum
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.http import urlquote
from rest_framework import exceptions, serializers
from rest_framework.response import Response
from rest_framework.validators import UniqueValidator
from foundation.models import Production, ProductionCrop
from dashboard.serializers.dashboard_production_crop_serializer import DashboardProductionCropListSerializer
logger = logging.getLogger(__name__)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
18931,
198,
11748,
12972,
22877,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
3128,
22602,
1330,
256,
89,
198,
6738,
42625,
14208,
13,
1041... | 3.742718 | 206 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# import [library]
# from [local_library] import [local_library_var]
if __name__ == '__main__':
pass
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
2,
1330,
685,
32016,
60,
198,
2,
422,
685,
12001,
62,
32016,
60,
1330,
685,
12001,
62,
32016,
62,
7785,
60,
198,
361,
11... | 2.466667 | 60 |
#!/usr/bin/env python
i = 0
while True:
i += 1
d1 = digits(i)
d2 = digits(2 * i)
if d1 == d2:
d3 = digits(3 * i)
if d1 == d3:
d4 = digits(4 * i)
if d1 == d4:
d5 = digits(5 * i)
if d1 == d5:
d6 = digits(6 * i)
if d1 == d6:
print(i)
break
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
72,
796,
657,
198,
4514,
6407,
25,
198,
220,
220,
220,
1312,
15853,
352,
198,
220,
220,
220,
288,
16,
796,
19561,
7,
72,
8,
198,
220,
220,
220,
288,
17,
796,
19561,
7,
17,
... | 1.432526 | 289 |
from fbprophet import Prophet
### Data Cleaning
| [
6738,
277,
65,
22930,
3202,
1330,
13583,
628,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198,
21017,
6060,
5985,
278,
198
] | 2.5 | 24 |
import onnx
from onnx import helper
from onnx import TensorProto
# This is to test the operators without "Qlinear" support but still support uint8 input
# These operators need to be internal to a graph/partition
# def GenerateModel(model_name):
if __name__ == "__main__":
GenerateModel('nnapi_internal_uint8_support.onnx')
| [
11748,
319,
77,
87,
198,
6738,
319,
77,
87,
1330,
31904,
198,
6738,
319,
77,
87,
1330,
309,
22854,
2964,
1462,
628,
198,
2,
770,
318,
284,
1332,
262,
12879,
1231,
366,
48,
29127,
1,
1104,
475,
991,
1104,
20398,
23,
5128,
198,
2,
... | 3.245098 | 102 |
#!/home/abcd/anaconda2/bin/python2.7
_description='''
This tool is used to generate pre-loadable(SW defined prototxt format <schema/DlaInterface.proto>) from caffe prototxt (<schema/caffe.proto>)
'''
import os
import inspect
import re
import sys
import argparse
import commands
import math
import logging
import copy
from pprint import pprint
from collections import OrderedDict
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import ctypes
from PIL import Image
from google.protobuf import text_format as proto_text
import caffe_pb2
class dict_of_dict(OrderedDict):
"""Implementation of perl's autovivification feature."""
__version__ = '0.5'
#################### Global Variables ######################
def read_proto(proto, filename):
"""
Read the existing address book.
Returns:
The proto structure
"""
try:
f = open(filename, "rb")
proto_text.Merge(f.read(), proto)
f.close()
except IOError:
logging.error(": Could not open file. Creating a new one.")
return
if __name__ == "__main__":
main(len(sys.argv), sys.argv)
| [
2,
48443,
11195,
14,
397,
10210,
14,
272,
330,
13533,
17,
14,
8800,
14,
29412,
17,
13,
22,
198,
198,
62,
11213,
28,
7061,
6,
198,
1212,
2891,
318,
973,
284,
7716,
662,
12,
2220,
540,
7,
17887,
5447,
1237,
313,
742,
5794,
1279,
1... | 2.846914 | 405 |
from swap_user.to_email_otp.base_managers import BaseEmailOTPUserManager
class EmailOTPUserManager(BaseEmailOTPUserManager):
"""
Concrete implementation of manager for EmailOTPUser.
"""
pass
| [
6738,
16075,
62,
7220,
13,
1462,
62,
12888,
62,
313,
79,
13,
8692,
62,
805,
10321,
1330,
7308,
15333,
2394,
5105,
2655,
13511,
628,
198,
4871,
9570,
2394,
5105,
2655,
13511,
7,
14881,
15333,
2394,
5105,
2655,
13511,
2599,
198,
220,
22... | 3 | 70 |
''' A tool for generating a runnable script, which runs a python file and deletes itself after it is ran. '''
__author__ = 'David Ma'
__version__ = '1.0.0' | [
7061,
6,
317,
2891,
329,
15453,
257,
1057,
77,
540,
4226,
11,
543,
4539,
257,
21015,
2393,
290,
28128,
274,
2346,
706,
340,
318,
4966,
13,
705,
7061,
628,
198,
834,
9800,
834,
796,
705,
11006,
6669,
6,
198,
834,
9641,
834,
796,
70... | 3.14 | 50 |
## ---------------------------------------------------------------- ##
## SYNTACTIC PRIMING MODEL MANAGEMENT AND INTERFACE
## ---------------------------------------------------------------- ##
## Usage:
##
## import sp
## s = sp.Simulation(n=25)
## s.simulate()
## s.data
## ---------------------------------------------------------------- ##
import actr
import os
import random
CONDITIONS = ['AC', 'AI', 'PC', 'PI']
class SP_Object():
"""The root of all experiment objects"""
CONDITIONS = ('AC', 'AI', 'PC', 'PI')
pass
class Sentence(SP_Object):
"""A SP experiment stimulus"""
@property
class Picture(SP_Object):
"""A structure to hold a picture"""
def __init__(self, agent="drbrown",
action="yell",
patient="martymcfly",
id = None):
"""Initializes a picture"""
self.agent = agent
self.patient = patient
self.action = action
self.id = id
@property
def chunk_definition(self):
"""Transforms a picture into a chunk definition"""
return ['isa', 'picture',
'kind', 'picture',
'agent', self.agent,
'action', self.action,
'patient', self.patient]
def __repr__(self):
"""Visual representation"""
return "<{%s} %s, %s, %s>" % (self.id,
self.agent,
self.action,
self.patient)
class Trial(SP_Object):
"""Trial"""
@property
def condition(self):
"""Returns the condition"""
return self._condition
@condition.setter
def condition(self, value):
"""Sets the condition (Active/Passive, Correct/Incorrect)"""
if value.upper() in ['AC', 'AI', 'PC', 'PI']:
self._condition = value
voice = 'active'
syntax_correct = 'yes'
if self.condition.startswith('P'):
voice = 'passive'
if self.condition.endswith('I'):
syntax_correct = 'no'
self.voice = voice
self.syntax_correct = 'no'
def __str__(self):
"""A representation of the trial"""
return "<[%s] S:%s, P:%s, P:%s>" % (self.condition,
self.sentence,
self.ppicture,
self.tpicture
)
def __repr__(self):
"""A representation of the trial"""
return self.__str__()
def load_trials(file="stimuli.txt"):
"""A trial"""
f = open(file)
lines = f.readlines()[1:]
N = len(lines)
tokenized = [x.split("\t") for x in lines]
trials = []
for tokens in tokenized:
trial_type = tokens[3]
t_verb = tokens[0]
t_image_ID = tokens[1]
t_image_agent = tokens[12]
t_image_object = tokens[13]
tpic = Picture(agent = t_image_agent,
patient = t_image_object,
action = t_verb,
id = t_image_ID)
p_image_ID = tokens[2]
p_image_n1 = tokens[7]
p_image_n2 = tokens[8]
ppic = Picture(agent = p_image_n1,
patient = p_image_n2,
action = t_verb,
id = p_image_ID)
p_noun1 = tokens[10]
p_noun2 = tokens[11]
p_sentence = tokens[5] # correct version[4]. incorrect version is tokens[5]
sen = Sentence(condition = trial_type,
verb = t_verb,
sentence = p_sentence)
trl = Trial(condition = trial_type,
sentence = sen,
ppicture = ppic,
tpicture = tpic)
trials.append(trl)
return trials
| [
2235,
16529,
22492,
198,
2235,
19704,
11251,
10659,
2149,
4810,
3955,
2751,
19164,
3698,
17254,
4760,
12529,
5357,
23255,
49836,
198,
2235,
16529,
22492,
198,
2235,
29566,
25,
198,
2235,
198,
2235,
220,
220,
1330,
599,
198,
2235,
220,
220... | 1.963736 | 2,013 |
import os
import sys
import tensorflow as tf
import numpy as np
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
from rouge import Rouge
import model as ml
import data
from configs import DEFINES
DATA_OUT_PATH = './data_out/'
# Req. 1-5-1. bleu score 계산 함수
# Req. 1-5-2. rouge score 계산 함수
# Serving 기능을 위하여 serving 함수를 구성한다.
if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
tf.compat.v1.app.run(main)
tf.logging.set_verbosity
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
2528,
74,
13,
7645,
17660,
13,
903,
84,
62,
26675,
1330,
6827,
62,
903,
84,
198,
6738,
299,
2528,
74,
13,
... | 2.037453 | 267 |
#------------------------------------------------------------------------------------------
# PLOT_FARGO
# A PYTHON PLOTTING CLASS TO VISUALIZE THE OUTPUT OF FARGO3D
# A. D. SCHNEIDER
# 2018
#
# The class Plot_FARGO can be used for the visualisation of FARGO3D Output
#
# Capabilities:
# - simplified 2D ploting
# - creating of 2D plots using ffmpeg
#
# Design philosophy:
# The complexity and the possibilitys provided by Matplotlib is great. This code tries to shorten unnescessary things
# without loosing the possibilities of Matplotlib. Therefore after apllying the routines one can still access
# fig, ax to continue individual ploting.
#
# Dependancies:
# - Python 3
# - matplotlib, numpy
# - ffmpeg (if plot_2D_video is used)
# - the multiprocessing module of python might be used in future
#
# There is no guarante that these routines work in every case.
#
#
#
import numpy as np
import os
from multiprocessing import Pool
import natconst as n
au = n.au
ms = n.MS
T0 = 20512
###############################
# __init__
###############################
# reads in the setup.par file to initialise the plotting
#
# arguments:
# - (string) path_to_fargo: absolute directory of fargo (without "/" at end)
# - (string) setup: name of setup that is plotted.
# Needs to be identical (case-sensitive) to the setupname used in FARGO3D
# - (string) setup_dir (optional): if a different location for the setup files is used, take this directory
# instead. Nevertheless the directory still needs to be a subfolder of the
# fargo directory
# - (bool) FARGOCA Some parameters in this library have been adapted for the specific use with FARGOCA.
###############################
# set_clim
###############################
# analogue to matplotlib.pyplot.set_clim, mainly needed for plot_2D_video
#
# arguments:
# - (number) cm_min: minimum value of the colormap
# - (number) cm_max: maximum value of the colormap
###############################
# set_units
###############################
# can be used to convert units
# right now: only converts cm to AU
#
# arguments:
# - (string) units: minimum value of the colormap
#
# possible units:
# - "AU": converts Y from cm to AU
###############################
# set_xlim
###############################
# can be used to fix the x_axis
# right now: only used in plot_1D and plot_1D_video()
# works like ax.set_xlim
#
# arguments:
# - (list) xlim: [x_min, x_max]
###############################
# set_ylim
###############################
# can be used to fix the y_axis
# right now: only used in plot_1D and plot_1D_video()
# works like ax.set_ylim
#
# arguments:
# - (list) ylim: [y_min, y_max]
###############################
# plot_2D
###############################
# main Method that takes the output number and output type (gasdens, etc) and creates a 2D plot
# plot_2D only works with cubic or cylindrical coordinates
#
# returns (if filename = ""):
# - fig: returned figure
# - ax: returnes axes
#
# arguments:
# - (number) output_number: number of the output being ploted
# - (list of chars) direct (optional): only needed in 3D! gives the direction in which the plot is done
# - (number) ax (optional): only needed in 3D! gives the indicee in perpendicular directionat which
# the profile is ploted
# - (string) tp (optional): tp specifys the variable that is ploted. by default this is the density
# important: tp needs to be the exact same as the relating filename
# - (string) filename (optional): specifies the filename for saving the plot.
# - (Bool) log10 (optional): set log10 = False to get a linear Plot
# - (Bool) polar (optional): if true the returned figure is ploted in cylindrical coordinates
###############################
# plot_2D_video (needs ffmpeg)
###############################
# Method that creates a video of the output files using ffmpeg
# Please note: this routine needs the subfolders "single_frames" and "videos"
# Warning: needs time, CPU power und storage (for temporary pictures and video)
#
# arguments (see also plot_2D):
# - (string) filename: the filename of the created video (and its temporary files)
# - (number) framesteps (optional): can be set >1 if one doesn't want to plot every picture
# - (number) N (optional): can be set, if the total number of outputs isn't the same as in setup.par
#def plot_1D_video(self, filename, tp = "gasdens", xlog10 = True, ylog10 = True,
# framesteps = 1, N = None, div=True, N_start=0, scale="scalefree"):
#import matplotlib.pyplot as plt
#if N is None:
# N = int(int(self.parameters["NTOT"])/int(self.parameters.get("NINTERM", 1)))
#def plot_and_save(i):
# plot_nr = int((i-N_start)/framesteps)
# self.plot_1D(i, tp=tp, xlog10 = xlog10, ylog10=ylog10,
# filename = "single_frames/"+filename+"{:05d}".format(plot_nr)+".png", div=div, scale=scale)
# if i > 10 and i % round(N / 10) == 0: print(round(i / N * 100), "%")
# plt.close()
#for i in range(N_start, int(N/framesteps) +1, framesteps):
# plot_and_save(i)
#cmd_string = "ffmpeg -framerate 24 -i single_frames/"+filename+"%05d.png -r 24 videos/"+filename+".mp4"
#del_string = "rm -rf single_frames/"+filename+"*.png"
#os.system(cmd_string)
#os.system(del_string)
| [
220,
220,
220,
1303,
10097,
22369,
438,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.453608 | 2,522 |
# Copyright (c) 2020, Zhouxing shi <zhouxingshichn@gmail.com>
# Licenced under the BSD 2-Clause License.
import os
if not "CUDA_VISIBLE_DEVICES" in os.environ:
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import torch
import numpy as np
import sys, random, time, shutil, copy, nltk, json
from multiprocessing import Pool
from Logger import Logger
from Parser import Parser, update_arguments
from data_utils import load_data, get_batches, set_seeds
from Models import Transformer
from Verifiers import VerifierForward, VerifierBackward, VerifierDiscrete
from eval_words import eval_words
argv = sys.argv[1:]
parser = Parser().getParser()
args, _ = parser.parse_known_args(argv)
args = update_arguments(args)
set_seeds(args.seed)
data_train, data_valid, data_test, _, _ = load_data(args)
set_seeds(args.seed)
import tensorflow as tf
config = tf.ConfigProto(device_count = {'GPU': 0})
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
with sess.as_default():
target = Transformer(args, data_train)
random.shuffle(data_valid)
random.shuffle(data_test)
valid_batches = get_batches(data_valid, args.batch_size)
test_batches = get_batches(data_test, args.batch_size)
print("Dataset sizes: %d/%d/%d" % (len(data_train), len(data_valid), len(data_test)))
summary_names = ["loss", "accuracy"]
summary_num_pre = 2
logger = Logger(sess, args, summary_names, 1)
print("\n")
if args.train:
while logger.epoch.eval() <= args.num_epoches:
random.shuffle(data_train)
train_batches = get_batches(data_train, args.batch_size)
for i, batch in enumerate(train_batches):
logger.next_step(target.step(batch, is_train=True)[:summary_num_pre])
target.save(logger.epoch.eval())
logger.next_epoch()
for batch in valid_batches:
logger.add_valid(target.step(batch)[:summary_num_pre])
logger.save_valid(log=True)
for batch in test_batches:
logger.add_test(target.step(batch)[:summary_num_pre])
logger.save_test(log=True)
data = data_valid if args.use_dev else data_test
if args.verify:
print("Verifying robustness...")
if args.method == "forward" or args.method == "ibp":
verifier = VerifierForward(args, target, logger)
elif args.method == "backward" or args.method == "baf":
verifier = VerifierBackward(args, target, logger)
elif args.method == "discrete":
verifier = VerifierDiscrete(args, target, logger)
else:
raise NotImplementedError("Method not implemented".format(args.method))
verifier.run(data)
exit(0)
if args.word_label:
eval_words(args, target, data_test)
exit(0)
# test the accuracy
acc = 0
for batch in test_batches:
acc += target.step(batch)[1] * len(batch)
acc = float(acc / len(data_test))
print("Accuracy: {:.3f}".format(acc))
with open(args.log, "w") as file:
file.write("{:.3f}".format(acc))
| [
2,
15069,
357,
66,
8,
12131,
11,
10511,
22193,
278,
427,
72,
1279,
38536,
87,
654,
71,
488,
77,
31,
14816,
13,
785,
29,
198,
2,
10483,
5864,
739,
262,
347,
10305,
362,
12,
2601,
682,
13789,
13,
198,
198,
11748,
28686,
198,
361,
... | 2.332593 | 1,350 |
from jose import jwk, jwt
from jose.jwt import JWTError, JWTClaimsError, ExpiredSignatureError
from jose.utils import base64url_decode
from jose.constants import ALGORITHMS
from social_core.backends.open_id_connect import OpenIdConnectAuth
from social_core.exceptions import AuthTokenError
from tethys_services.backends.multi_tenant_mixin import MultiTenantMixin
class OneLoginOIDC(OpenIdConnectAuth):
"""OneLogin OpenIDConnect authentication backend."""
name = 'onelogin-oidc'
@property
def validate_and_return_id_token(self, id_token, access_token):
"""
Validates the id_token according to the steps at
http://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation.
"""
client_id, client_secret = self.get_key_and_secret()
key = self.find_valid_key(id_token)
if not key:
raise AuthTokenError(self, 'Signature verification failed')
rsakey = jwk.construct(key, algorithm=ALGORITHMS.RS256)
try:
claims = jwt.decode(
id_token,
rsakey.to_pem().decode('utf-8'),
algorithms=[ALGORITHMS.HS256, ALGORITHMS.RS256, ALGORITHMS.ES256],
audience=client_id,
issuer=self.id_token_issuer(),
access_token=access_token,
options=self.JWT_DECODE_OPTIONS,
)
except ExpiredSignatureError:
raise AuthTokenError(self, 'Signature has expired')
except JWTClaimsError as error:
raise AuthTokenError(self, str(error))
except JWTError:
raise AuthTokenError(self, 'Invalid signature')
self.validate_claims(claims)
| [
6738,
474,
577,
1330,
474,
43021,
11,
474,
46569,
198,
6738,
474,
577,
13,
73,
46569,
1330,
449,
39386,
12331,
11,
449,
39386,
44819,
82,
12331,
11,
5518,
1202,
11712,
1300,
12331,
198,
6738,
474,
577,
13,
26791,
1330,
2779,
2414,
637... | 2.266578 | 754 |
x = ["p", "y", "t", "h", "o", "n"]
print(x[0:5])
import pandas as pd
marketing = pd.read_csv("https://goo.gl/6A6Qe2")
print(marketing.loc[["a", "b"], ["Views", "Clicks"]])
import numpy as np
x = np.array([1, 2, False, True, 3])
print(x)
import numpy as np
x = np.array([1, 2, False, True, "3"])
print(x)
print(type(x))
import numpy as np
x = np.array([7, 7, 5, 4, 5, 5, 5, 7])
y = np.array([4, 2, 9, 0, 5, 1, 6, 8])
print(np.correlate(x, y))
print(np.corrcoef(x, y))
x = [9, 12, 4, 7]
x.reverse()
print(x)
p = ["D", 2, "E", 5, "F", 4]
q = p.append(['X', 8])
print(q)
import numpy as np
x = np.array([11, 12, 17, 15, 18])
x_small = x[x < 16]
print(x_small)
# Find the mean of the first column of costs
import numpy as np
costs = np.column_stack(([2, 2, 3, 1, 3, 3, 3, 2],
[4, 4, 4, 7, 7, 7, 4, 7]))
# mean_costs = mean(costs[0:1,0])
print(np.mean(costs[0]))
import numpy as np
np_2d = np.array([[4, 7, 8],
[19, 5, 18]])
print(np_2d[0][1])
x = "cautioned"
print(x.replace("u", "+"))
import numpy as np
x = np.array([3, 2, 9, 5, 7])
bool_x = x >= 7
print(bool_x)
import pandas as pd
fruits = pd.read_csv("https://goo.gl/DOw6pe")
print(fruits.loc[[0],["Bananas"]])
a = 7
b = [0, 1]
c = [3, 9, "True"]
print([a,b,c])
x = [11, 12, 13, 14]
y = x
y[2:4] = [15, 16]
print(x)
import pandas as pd
classes = pd.read_csv("https://goo.gl/JvBiH4")
classes["Course"] = ["Math", "Math", "Science"]
print(classes)
x = 4.123412
print(int(x))
import pandas as pd
stores = pd.read_csv("https://goo.gl/LN5wGF")
print(stores.loc["a"])
x = True
y = "x is:"
print(y + str(x))
import numpy as np
np_2d = np.array([[1, 2, 3],
[17, 18, 19]])
print(np_2d[1:, 0:])
# Print the number of occurrences of the string "b" in list x
x = ["b", "c", "c", "a", "b", "a"]
print(x
.count("b")
)
import numpy as np
x = np.array([[2, 6, 4], [1, 2, 2]])
y = np.array([[6, 2, 4], [2, 2, 1]])
print(x - y)
x = [9, "H", "M", 3, "R", 11]
del(x[1:3])
print(x)
import matplotlib.pyplot as plt
plt.show()
print(True or not(True))
# Find the standard deviation of x
import numpy as np
x = np.array([0.4, 1.2, 1.1])
print(np.std(x))
x = 5
y = -2
z = -1
print(
[y,z,x]
)
x = ["A", "B", "C", "D", "E", "F"]
print(x[2] + x[5])
import numpy as np
store = [3, 4, 5, 3, 4, 5]
cost = [94, 87, 81, 96, 97, 92]
np_cols = np.column_stack((store, cost))
print(np_cols)
x = 2
if not x:
print("First attempt")
elif x % 2 == 0:
print("Second attempt")
else:
print("Final attempt")
x = [2, 5, 4, 0, 7, 1]
print(x[0])
import numpy as np
store = np.array(["A", "A", "B", "B", "B", "C", "C"])
cost = np.array([27, 22, 26, 30, 24, 25, 21])
select_cost = cost[store == "A"]
print(select_cost)
x = [10, 7, 8, 4, 9, 6]
print(x[-2]+x[-6])
x = [12, 2, 5, 15, 4, 1]
print(x[-5:])
x = [5, 8, 2, 3, 4, 1]
print(min(x))
# Print the number of occurences of the letter "e" in x
x = "this sentence has no meaning"
print(x
.count("e")
)
foo = [True, 3.2, "Apples", 0, "1.2"]
foo[1:3] = [8,1]
print(foo)
x = [6, 15, 19, 8, 18, 1]
print(sorted(x, reverse = False))
import numpy as np
x = np.array([2, 6, 4])
y = np.array([2, 1, 1])
print(x / y) | [
87,
796,
14631,
79,
1600,
366,
88,
1600,
366,
83,
1600,
366,
71,
1600,
366,
78,
1600,
366,
77,
8973,
198,
4798,
7,
87,
58,
15,
25,
20,
12962,
628,
198,
11748,
19798,
292,
355,
279,
67,
198,
10728,
278,
796,
279,
67,
13,
961,
6... | 1.998762 | 1,615 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2018 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the low-level functionality required for speaking
Bolt. It is not intended to be used directly by driver users. Instead,
the `session` module provides the main user-facing abstractions.
"""
from __future__ import division
__all__ = [
"DEFAULT_PORT",
"AbstractConnectionPool",
"Connection",
"ConnectionPool",
"ServerInfo",
"connect",
]
from collections import deque
from logging import getLogger
from select import select
from socket import socket, SOL_SOCKET, SO_KEEPALIVE, SHUT_RDWR, error as SocketError, timeout as SocketTimeout, AF_INET, AF_INET6
from struct import pack as struct_pack, unpack as struct_unpack
from threading import RLock, Condition
from sys import platform, version_info
from neobolt.addressing import SocketAddress, Resolver
from neobolt.compat import perf_counter
from neobolt.compat.ssl import SSL_AVAILABLE, HAS_SNI, SSLSocket, SSLError
from neobolt.exceptions import ClientError, ProtocolError, SecurityError, ServiceUnavailable, AuthError, CypherError
from neobolt.meta import version, import_best
from neobolt.packstream import Packer, Unpacker
from neobolt.security import AuthToken, TRUST_DEFAULT, TRUST_ON_FIRST_USE, KNOWN_HOSTS, PersonalCertificateStore, \
SecurityPlan
ChunkedInputBuffer = import_best("neobolt.bolt._io", "neobolt.bolt.io").ChunkedInputBuffer
ChunkedOutputBuffer = import_best("neobolt.bolt._io", "neobolt.bolt.io").ChunkedOutputBuffer
DEFAULT_PORT = 7687
MAGIC_PREAMBLE = 0x6060B017
# Connection Pool Management
INFINITE = -1
DEFAULT_MAX_CONNECTION_LIFETIME = 3600 # 1h
DEFAULT_MAX_CONNECTION_POOL_SIZE = 100
DEFAULT_CONNECTION_TIMEOUT = 5.0 # 5s
DEFAULT_KEEP_ALIVE = True
# Connection Settings
DEFAULT_CONNECTION_ACQUISITION_TIMEOUT = 60 # 1m
# Client name
DEFAULT_USER_AGENT = "neobolt/{} Python/{}.{}.{}-{}-{} ({})".format(
*((version,) + tuple(version_info) + (platform,)))
# Set up logger
log = getLogger("neobolt")
log_debug = log.debug
class ConnectionErrorHandler(object):
""" A handler for send and receive errors.
"""
class Connection(object):
""" Server connection for Bolt protocol v1.
A :class:`.Connection` should be constructed following a
successful Bolt handshake and takes the socket over which
the handshake was carried out.
.. note:: logs at INFO level
"""
#: The protocol version in use on this connection
protocol_version = 0
#: Server details for this connection
server = None
in_use = False
_closed = False
_defunct = False
#: The pool of which this connection is a member
pool = None
#: Error class used for raising connection errors
Error = ServiceUnavailable
_last_run_statement = None
@property
@property
def _append(self, signature, fields=(), response=None):
""" Add a message to the outgoing queue.
:arg signature: the signature of the message
:arg fields: the fields of the message as a tuple
:arg response: a response object to handle callbacks
"""
self.packer.pack_struct(signature, fields)
self.output_buffer.chunk()
self.output_buffer.chunk()
self.responses.append(response)
def reset(self):
""" Add a RESET message to the outgoing queue, send
it and consume all remaining messages.
"""
log_debug("[#%04X] C: RESET", self.local_port)
self._append(b"\x0F", response=Response(self, on_failure=fail))
self.sync()
def _send(self):
""" Send all queued messages to the server.
"""
data = self.output_buffer.view()
if not data:
return
if self.closed():
raise self.Error("Failed to write to closed connection {!r}".format(self.server.address))
if self.defunct():
raise self.Error("Failed to write to defunct connection {!r}".format(self.server.address))
self.socket.sendall(data)
self.output_buffer.clear()
def _fetch(self):
""" Receive at least one message from the server, if available.
:return: 2-tuple of number of detail messages and number of summary messages fetched
"""
if self.closed():
raise self.Error("Failed to read from closed connection {!r}".format(self.server.address))
if self.defunct():
raise self.Error("Failed to read from defunct connection {!r}".format(self.server.address))
if not self.responses:
return 0, 0
self._receive()
details, summary_signature, summary_metadata = self._unpack()
if details:
log_debug("[#%04X] S: RECORD * %d", self.local_port, len(details)) # TODO
self.responses[0].on_records(details)
if summary_signature is None:
return len(details), 0
response = self.responses.popleft()
response.complete = True
if summary_signature == b"\x70":
log_debug("[#%04X] S: SUCCESS %r", self.local_port, summary_metadata)
response.on_success(summary_metadata or {})
elif summary_signature == b"\x7E":
self._last_run_statement = None
log_debug("[#%04X] S: IGNORED", self.local_port)
response.on_ignored(summary_metadata or {})
elif summary_signature == b"\x7F":
self._last_run_statement = None
log_debug("[#%04X] S: FAILURE %r", self.local_port, summary_metadata)
response.on_failure(summary_metadata or {})
else:
self._last_run_statement = None
raise ProtocolError("Unexpected response message with signature %02X" % summary_signature)
return len(details), 1
def sync(self):
""" Send and fetch all outstanding messages.
:return: 2-tuple of number of detail messages and number of summary messages fetched
"""
self.send()
detail_count = summary_count = 0
while self.responses:
response = self.responses[0]
while not response.complete:
detail_delta, summary_delta = self.fetch()
detail_count += detail_delta
summary_count += summary_delta
return detail_count, summary_count
def close(self):
""" Close the connection.
"""
if not self._closed:
if self.protocol_version >= 3:
log_debug("[#%04X] C: GOODBYE", self.local_port)
self._append(b"\x02", ())
try:
self.send()
except ServiceUnavailable:
pass
log_debug("[#%04X] C: <CLOSE>", self.local_port)
try:
self.socket.close()
except IOError:
pass
finally:
self._closed = True
class AbstractConnectionPool(object):
""" A collection of connections to one or more server addresses.
"""
_closed = False
def acquire_direct(self, address):
""" Acquire a connection to a given address from the pool.
The address supplied should always be an IP address, not
a host name.
This method is thread safe.
"""
if self.closed():
raise ServiceUnavailable("Connection pool closed")
with self.lock:
try:
connections = self.connections[address]
except KeyError:
connections = self.connections[address] = deque()
connection_acquisition_start_timestamp = perf_counter()
while True:
# try to find a free connection in pool
for connection in list(connections):
if connection.closed() or connection.defunct() or connection.timedout():
connections.remove(connection)
continue
if not connection.in_use:
connection.in_use = True
return connection
# all connections in pool are in-use
can_create_new_connection = self._max_connection_pool_size == INFINITE or len(connections) < self._max_connection_pool_size
if can_create_new_connection:
try:
connection = self.connector(address, error_handler=self.connection_error_handler)
except ServiceUnavailable:
self.remove(address)
raise
else:
connection.pool = self
connection.in_use = True
connections.append(connection)
return connection
# failed to obtain a connection from pool because the pool is full and no free connection in the pool
span_timeout = self._connection_acquisition_timeout - (perf_counter() - connection_acquisition_start_timestamp)
if span_timeout > 0:
self.cond.wait(span_timeout)
# if timed out, then we throw error. This time computation is needed, as with python 2.7, we cannot
# tell if the condition is notified or timed out when we come to this line
if self._connection_acquisition_timeout <= (perf_counter() - connection_acquisition_start_timestamp):
raise ClientError("Failed to obtain a connection from pool within {!r}s".format(
self._connection_acquisition_timeout))
else:
raise ClientError("Failed to obtain a connection from pool within {!r}s".format(self._connection_acquisition_timeout))
def acquire(self, access_mode=None):
""" Acquire a connection to a server that can satisfy a set of parameters.
:param access_mode:
"""
def release(self, connection):
""" Release a connection back into the pool.
This method is thread safe.
"""
with self.lock:
connection.in_use = False
self.cond.notify_all()
def in_use_connection_count(self, address):
""" Count the number of connections currently in use to a given
address.
"""
try:
connections = self.connections[address]
except KeyError:
return 0
else:
return sum(1 if connection.in_use else 0 for connection in connections)
def deactivate(self, address):
""" Deactivate an address from the connection pool, if present, closing
all idle connection to that address
"""
with self.lock:
try:
connections = self.connections[address]
except KeyError: # already removed from the connection pool
return
for conn in list(connections):
if not conn.in_use:
connections.remove(conn)
try:
conn.close()
except IOError:
pass
if not connections:
self.remove(address)
def remove(self, address):
""" Remove an address from the connection pool, if present, closing
all connections to that address.
"""
with self.lock:
for connection in self.connections.pop(address, ()):
try:
connection.close()
except IOError:
pass
def close(self):
""" Close all connections and empty the pool.
This method is thread safe.
"""
if self._closed:
return
try:
with self.lock:
if not self._closed:
self._closed = True
for address in list(self.connections):
self.remove(address)
except TypeError as e:
pass
def closed(self):
""" Return :const:`True` if this pool is closed, :const:`False`
otherwise.
"""
with self.lock:
return self._closed
class Response(object):
""" Subscriber object for a full response (zero or
more detail messages followed by one summary message).
"""
def on_records(self, records):
""" Called when one or more RECORD messages have been received.
"""
handler = self.handlers.get("on_records")
if callable(handler):
handler(records)
def on_success(self, metadata):
""" Called when a SUCCESS message has been received.
"""
handler = self.handlers.get("on_success")
if callable(handler):
handler(metadata)
handler = self.handlers.get("on_summary")
if callable(handler):
handler()
def on_failure(self, metadata):
""" Called when a FAILURE message has been received.
"""
self.connection.reset()
handler = self.handlers.get("on_failure")
if callable(handler):
handler(metadata)
handler = self.handlers.get("on_summary")
if callable(handler):
handler()
raise CypherError.hydrate(**metadata)
def on_ignored(self, metadata=None):
""" Called when an IGNORED message has been received.
"""
handler = self.handlers.get("on_ignored")
if callable(handler):
handler(metadata)
handler = self.handlers.get("on_summary")
if callable(handler):
handler()
# TODO: remove in 2.0
def _last_bookmark(b0, b1):
""" Return the latest of two bookmarks by looking for the maximum
integer value following the last colon in the bookmark string.
"""
n = [None, None]
_, _, n[0] = b0.rpartition(":")
_, _, n[1] = b1.rpartition(":")
for i in range(2):
try:
n[i] = int(n[i])
except ValueError:
raise ValueError("Invalid bookmark: {}".format(b0))
return b0 if n[0] > n[1] else b1
# TODO: remove in 2.0
def last_bookmark(bookmarks):
""" The bookmark returned by the last :class:`.Transaction`.
"""
last = None
for bookmark in bookmarks:
if last is None:
last = bookmark
else:
last = _last_bookmark(last, bookmark)
return last
def _connect(resolved_address, **config):
"""
:param resolved_address:
:param config:
:return: socket object
"""
s = None
try:
if len(resolved_address) == 2:
s = socket(AF_INET)
elif len(resolved_address) == 4:
s = socket(AF_INET6)
else:
raise ValueError("Unsupported address {!r}".format(resolved_address))
t = s.gettimeout()
s.settimeout(config.get("connection_timeout", DEFAULT_CONNECTION_TIMEOUT))
log_debug("[#0000] C: <OPEN> %s", resolved_address)
s.connect(resolved_address)
s.settimeout(t)
s.setsockopt(SOL_SOCKET, SO_KEEPALIVE, 1 if config.get("keep_alive", DEFAULT_KEEP_ALIVE) else 0)
except SocketTimeout:
log_debug("[#0000] C: <TIMEOUT> %s", resolved_address)
log_debug("[#0000] C: <CLOSE> %s", resolved_address)
s.close()
raise ServiceUnavailable("Timed out trying to establish connection to {!r}".format(resolved_address))
except SocketError as error:
log_debug("[#0000] C: <ERROR> %s %s", type(error).__name__, " ".join(map(repr, error.args)))
log_debug("[#0000] C: <CLOSE> %s", resolved_address)
s.close()
if error.errno in (61, 99, 111, 10061):
raise ServiceUnavailable("Failed to establish connection to {!r} (reason {})".format(resolved_address, error.errno))
else:
raise
except ConnectionResetError:
raise ServiceUnavailable("Failed to establish connection to {!r}".format(resolved_address))
else:
return s
def _handshake(s, resolved_address, der_encoded_server_certificate, **config):
"""
:param s:
:return:
"""
local_port = s.getsockname()[1]
# Send details of the protocol versions supported
supported_versions = [3, 2, 1, 0]
handshake = [MAGIC_PREAMBLE] + supported_versions
log_debug("[#%04X] C: <MAGIC> 0x%08X", local_port, MAGIC_PREAMBLE)
log_debug("[#%04X] C: <HANDSHAKE> 0x%08X 0x%08X 0x%08X 0x%08X", local_port, *supported_versions)
data = b"".join(struct_pack(">I", num) for num in handshake)
s.sendall(data)
# Handle the handshake response
ready_to_read, _, _ = select((s,), (), (), 0)
while not ready_to_read:
ready_to_read, _, _ = select((s,), (), (), 0)
try:
data = s.recv(4)
except ConnectionResetError:
raise ServiceUnavailable("Failed to read any data from server {!r} after connected".format(resolved_address))
data_size = len(data)
if data_size == 0:
# If no data is returned after a successful select
# response, the server has closed the connection
log_debug("[#%04X] S: <CLOSE>", local_port)
s.close()
raise ProtocolError("Connection to %r closed without handshake response" % (resolved_address,))
if data_size != 4:
# Some garbled data has been received
log_debug("[#%04X] S: @*#!", local_port)
s.close()
raise ProtocolError("Expected four byte handshake response, received %r instead" % data)
agreed_version, = struct_unpack(">I", data)
log_debug("[#%04X] S: <HANDSHAKE> 0x%08X", local_port, agreed_version)
if agreed_version == 0:
log_debug("[#%04X] C: <CLOSE>", local_port)
s.shutdown(SHUT_RDWR)
s.close()
elif agreed_version in (1, 2):
connection = Connection(agreed_version, resolved_address, s,
der_encoded_server_certificate=der_encoded_server_certificate,
**config)
connection.init()
return connection
elif agreed_version in (3,):
connection = Connection(agreed_version, resolved_address, s,
der_encoded_server_certificate=der_encoded_server_certificate,
**config)
connection.hello()
return connection
elif agreed_version == 0x48545450:
log_debug("[#%04X] S: <CLOSE>", local_port)
s.close()
raise ServiceUnavailable("Cannot to connect to Bolt service on {!r} "
"(looks like HTTP)".format(resolved_address))
else:
log_debug("[#%04X] S: <CLOSE>", local_port)
s.close()
raise ProtocolError("Unknown Bolt protocol version: {}".format(agreed_version))
def connect(address, **config):
""" Connect and perform a handshake and return a valid Connection object, assuming
a protocol version can be agreed.
"""
security_plan = SecurityPlan.build(**config)
last_error = None
# Establish a connection to the host and port specified
# Catches refused connections see:
# https://docs.python.org/2/library/errno.html
log_debug("[#0000] C: <RESOLVE> %s", address)
resolver = Resolver(custom_resolver=config.get("resolver"))
resolver.addresses.append(address)
resolver.custom_resolve()
resolver.dns_resolve()
for resolved_address in resolver.addresses:
try:
s = _connect(resolved_address, **config)
s, der_encoded_server_certificate = _secure(s, address[0], security_plan.ssl_context, **config)
connection = _handshake(s, resolved_address, der_encoded_server_certificate, **config)
except Exception as error:
last_error = error
else:
return connection
if last_error is None:
raise ServiceUnavailable("Failed to resolve addresses for %s" % address)
else:
raise last_error
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
66,
8,
6244,
12,
7908,
366,
8199,
78,
19,
73,
553,
198,
2,
21227,
19,
73,
10710,
9564,
685,
4... | 2.34792 | 8,798 |
from PuppeteerLibrary.ikeywords.ipdf_async import iPDFAsync, DEFAULT_FILENAME_PAGE
from PuppeteerLibrary.base.librarycomponent import LibraryComponent
from PuppeteerLibrary.base.robotlibcore import keyword
| [
6738,
20926,
14471,
263,
23377,
13,
522,
88,
10879,
13,
541,
7568,
62,
292,
13361,
1330,
1312,
20456,
42367,
11,
5550,
38865,
62,
46700,
1677,
10067,
62,
4537,
8264,
198,
6738,
20926,
14471,
263,
23377,
13,
8692,
13,
32016,
42895,
1330,... | 3.491525 | 59 |