content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from __future__ import division
from shapely.wkt import loads as wkt_loads
import os
import shapely
import shapely.geometry
import shapely.affinity
import h5py
import pandas as pd
import tifffile as tiff
from numba import jit, njit
from tqdm import tqdm
from collections import defaultdict
import csv
import sys
import cv2
from shapely.geometry import MultiPolygon, Polygon
import shapely.wkt
import shapely.affinity
import numpy as np
import skimage.color as color
from skimage.transform import rescale
# dirty hacks from SO to allow loading of big cvs's
# without decrement loop it crashes with C error
# http://stackoverflow.com/questions/15063936/csv-error-field-larger-than-field-limit-131072
maxInt = sys.maxsize
decrement = True
while decrement:
# decrease the maxInt value by factor 10
# as long as the OverflowError occurs.
decrement = False
try:
csv.field_size_limit(maxInt)
except OverflowError:
maxInt = int(maxInt/10)
decrement = True
data_path = '../data'
train_wkt = pd.read_csv(os.path.join(data_path, 'train_wkt_v4.csv'))
gs = pd.read_csv(os.path.join(data_path, 'grid_sizes.csv'), names=['ImageId', 'Xmax', 'Ymin'], skiprows=1)
shapes = pd.read_csv(os.path.join(data_path, '3_shapes.csv'))
epsilon = 1e-15
def get_scalers(height, width, x_max, y_min):
"""
:param height:
:param width:
:param x_max:
:param y_min:
:return: (xscaler, yscaler)
"""
w_ = width * (width / (width + 1))
h_ = height * (height / (height + 1))
return w_ / x_max, h_ / y_min
def polygons2mask_layer(height, width, polygons, image_id):
"""
:param height:
:param width:
:param polygons:
:return:
"""
x_max, y_min = _get_xmax_ymin(image_id)
x_scaler, y_scaler = get_scalers(height, width, x_max, y_min)
polygons = shapely.affinity.scale(polygons, xfact=x_scaler, yfact=y_scaler, origin=(0, 0, 0))
img_mask = np.zeros((height, width), np.uint8)
if not polygons:
return img_mask
int_coords = lambda x: np.array(x).round().astype(np.int32)
exteriors = [int_coords(poly.exterior.coords) for poly in polygons]
interiors = [int_coords(pi.coords) for poly in polygons for pi in poly.interiors]
cv2.fillPoly(img_mask, exteriors, 1)
cv2.fillPoly(img_mask, interiors, 0)
return img_mask
def generate_mask(image_id, height, width, num_mask_channels=10, train=train_wkt):
"""
:param image_id:
:param height:
:param width:
:param num_mask_channels: numbers of channels in the desired mask
:param train: polygons with labels in the polygon format
:return: mask corresponding to an image_id of the desired height and width with desired number of channels
"""
mask = np.zeros((num_mask_channels, height, width))
for mask_channel in range(num_mask_channels):
poly = train.loc[(train['ImageId'] == image_id)
& (train['ClassType'] == mask_channel + 1), 'MultipolygonWKT'].values[0]
polygons = shapely.wkt.loads(poly)
mask[mask_channel, :, :] = polygons2mask_layer(height, width, polygons, image_id)
return mask
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
6738,
5485,
306,
13,
86,
21841,
1330,
15989,
355,
266,
21841,
62,
46030,
198,
198,
11748,
28686,
198,
11748,
5485,
306,
198,
11748,
5485,
306,
13,
469,
15748,
198,
11748,
5485,
306,
13,
2... | 2.509509 | 1,262 |
import tkinter
import time
import othello as oth
LIGHT_FONT = ("Nexa light", 12)
BOLD_FONT = ("Nexa Bold", 11)
CELL_LENGTH = 60
COL_ROW_NAME_LENGTH = 40
DELAY = 1.5
CRED2 = '\33[91m'
CYELLOW2 = '\33[93m'
CURRENT_NUMB_CALL = 0
if __name__ == '__main__':
game_logic_instance = oth.GameLogic()
game_logic_instance.create_board()
OthelloBoard().run()
| [
11748,
256,
74,
3849,
198,
11748,
640,
198,
198,
11748,
267,
1169,
18798,
355,
267,
400,
198,
198,
43,
9947,
62,
37,
35830,
796,
5855,
45,
1069,
64,
1657,
1600,
1105,
8,
198,
33,
15173,
62,
37,
35830,
796,
5855,
45,
1069,
64,
2705... | 2.147059 | 170 |
"""
Java Maven Workflow
"""
from aws_lambda_builders.workflow import BaseWorkflow, Capability
from aws_lambda_builders.actions import CopySourceAction, CleanUpAction
from aws_lambda_builders.workflows.java.actions import JavaCopyDependenciesAction, JavaMoveDependenciesAction
from aws_lambda_builders.workflows.java.utils import OSUtils, is_experimental_maven_scope_and_layers_active
from .actions import (
JavaMavenBuildAction,
JavaMavenCopyDependencyAction,
JavaMavenCopyArtifactsAction,
JavaMavenCopyLayerArtifactsAction,
)
from .maven import SubprocessMaven
from .maven_resolver import MavenResolver
from .maven_validator import MavenValidator
class JavaMavenWorkflow(BaseWorkflow):
"""
A Lambda builder workflow that knows how to build Java projects using Maven.
"""
NAME = "JavaMavenWorkflow"
CAPABILITY = Capability(language="java", dependency_manager="maven", application_framework=None)
EXCLUDED_FILES = (".aws-sam", ".git")
| [
37811,
198,
29584,
337,
4005,
5521,
11125,
198,
37811,
198,
6738,
3253,
82,
62,
50033,
62,
50034,
13,
1818,
11125,
1330,
7308,
12468,
11125,
11,
4476,
1799,
198,
6738,
3253,
82,
62,
50033,
62,
50034,
13,
4658,
1330,
17393,
7416,
12502,
... | 3.181818 | 308 |
import argparse
import csv
import os
from collections import OrderedDict
from datetime import datetime
from github import Github
if __name__ == "__main__":
print("Running...\n")
# Variable to let us know how long this takes...
begin_time = datetime.now()
# Setup arguments
parser = argparse.ArgumentParser()
parser.add_argument(
'--repo', required=True,
help='''
Target (remote) GitHub repo. (i.e. "org-name/repo-name")
''',
)
parser.add_argument(
'--token', required=True,
help='''
GitHub API Token.
''',
)
parser.add_argument(
'--branch', default="master",
help='''
Target branch of git repo. (Default: "master")
''',
)
args = parser.parse_args()
# Setup GitHub connection
g = Github(login_or_token=args.token, per_page=100)
repo = g.get_repo(args.repo)
# Define object to return
monthly_stats = {}
# Place to store all users when they first pull
pull_authors = {
"all": [],
"new": {}
}
# Object for author stats
monthly_author_stats = {}
# Loop all the closed PR's, oldest-to-newest
for pull in repo.get_pulls('closed', direction="asc", base=args.branch):
# Skip if not merged
if pull.merged is False:
continue
# Set the current month
current_month = pull.merged_at.strftime("%Y-%m")
# See if month exists in object; if not, create it.
if monthly_stats.get(current_month, False) is False:
monthly_stats[current_month] = {
"pull_requests": 0,
"new_contributors": 0,
"total_contributors": 0
}
# END if monthly_stats.get
# Add to "new author" counter, if never seen before
if pull.user.login not in pull_authors["all"]:
monthly_stats[current_month]["new_contributors"] += 1
pull_authors["all"].append(pull.user.login)
# Make sure we have an object for this month
if pull_authors["new"].get(current_month, False) is False:
pull_authors["new"][current_month] = []
# END if pull_authors["new"]
# Add them to a list for output later
pull_authors["new"][current_month].append(pull.user.login)
# END if pull.user.login
# Add to pull counter
monthly_stats[current_month]["pull_requests"] += 1
# Now setup the monthly author stats
# See if month exists in object; if not, create it.
if monthly_author_stats.get(current_month, False) is False:
monthly_author_stats[current_month] = {}
if pull.user.login not in monthly_author_stats[current_month]:
monthly_author_stats[current_month][pull.user.login] = 0
# Add to counter
monthly_author_stats[current_month][pull.user.login] += 1
# END if monthly_author_stats.get
# END if pull.merged
# END for pull
# TODO: figure out why we have to sort the list for some reason ...
pull_authors_ordered = OrderedDict(
sorted(pull_authors["new"].items(), key=lambda t: t[0])
)
pull_authors["new"] = pull_authors_ordered
# Output the New Authors by month
# Need a running tally of all authors
total_authors = 0
for month in pull_authors["new"]:
# Add this month's new Authors to tally
total_authors += len(pull_authors["new"][month])
# Set that value in this month's data
monthly_stats[month]['total_contributors'] = total_authors
# Print out the month, with some stats
print("\n---%s--- (%s/%s)" % (
month,
len(pull_authors["new"][month]),
total_authors
))
# Print out the new authors
for author in pull_authors["new"][month]:
print(author)
# END for author
# END for month
# Output the general monthly stats
# Define CSV name
csv_name_unique = "%s.github_monthly_stats.%s.csv" % (
args.repo.split('/')[-1],
datetime.now().strftime('%Y%m%d_%H%M')
)
# Create first row of CSV
first_row = [
"Month", "Pull Requests",
"New Contributors",
"Total Contributors"
]
write_to_csv_file(csv_name_unique, first_row, write_type="w")
# For each month, write data
for month in monthly_stats:
unique_row_to_write = [month]
for attr in monthly_stats[month]:
unique_row_to_write.append(monthly_stats[month][attr])
# END for attr
write_to_csv_file(csv_name_unique, unique_row_to_write)
# END for month
# Display where the CSV file is
print("\n\nCreated file:\n\t%s/%s" % (
os.getcwd(), csv_name_unique
))
# Output the monthly author
# Define CSV name
csv_name_unique = "%s.github_author_stats.%s.csv" % (
args.repo.split('/')[-1],
datetime.now().strftime('%Y%m%d_%H%M')
)
# Create first row of CSV
first_row = [
"Month", "Author", "Pull Requests"
]
write_to_csv_file(csv_name_unique, first_row, write_type="w")
# For each month, write data
for month in monthly_author_stats:
for author in monthly_author_stats[month]:
# print(month)
# print(author)
# print(monthly_author_stats[month][author])
write_to_csv_file(csv_name_unique, [
month, author,
monthly_author_stats[month][author]
])
# END for author
# END for month
# Display where the CSV file is
print("\n\nCreated file:\n\t%s/%s" % (
os.getcwd(), csv_name_unique
))
print("DONE!")
# How long did this take?
print(datetime.now() - begin_time)
exit()
| [
11748,
1822,
29572,
198,
11748,
269,
21370,
198,
11748,
28686,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
33084,
1330,
38994,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
1... | 2.233522 | 2,655 |
#!/usr/bin/python
import socket
import json
import os
import pwd
UDP_IP = "127.0.0.1"
UDP_PORT = 5006
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Internet UDP
user = pwd.getpwuid( os.getuid() ).pw_name
print "Running as", user
session_list = []
while (True) :
display_menu()
cmd = raw_input("add <session>, drop <session>, exit: ")
print ""
if len(cmd) > 0 :
params = cmd.split()
session_num = -1
if len(params) > 1 :
try :
session_num = int(params[1])
except :
print "Must enter an integer value"
if params[0] == "exit" :
break
elif params[0] == "add" and len(params) > 1 :
add(session_num)
elif params[0] == "drop" and len(params) > 1 :
drop(session_num)
else :
print "Unrecognized command"
print "thank you for using the help session registration system"
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
17802,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
279,
16993,
198,
198,
52,
6322,
62,
4061,
796,
366,
16799,
13,
15,
13,
15,
13,
16,
1,
198,
52,
6322,
62,
15490,
796,
5323,
... | 2.126386 | 451 |
# DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS songplays;"
user_table_drop = "DROP TABLE IF EXISTS users;"
song_table_drop = "DROP TABLE IF EXISTS songs;"
artist_table_drop = "DROP TABLE IF EXISTS artists;"
time_table_drop = "DROP TABLE IF EXISTS time;"
# CREATE TABLES
songplay_table_create = ("""
CREATE TABLE IF NOT EXISTS songplays (
songplay_id serial PRIMARY KEY,
start_time bigint NOT NULL,
user_id integer,
level varchar(255) NOT NULL,
song_id varchar(255),
artist_id varchar(255),
session_id integer NOT NULL,
location varchar(255),
user_agent varchar(255)
);
""")
user_table_create = ("""
CREATE TABLE IF NOT EXISTS users (
user_id integer PRIMARY KEY,
first_name varchar(255),
last_name varchar(255),
gender char,
level varchar(255) NOT NULL
);
""")
song_table_create = ("""
CREATE TABLE IF NOT EXISTS songs (
song_id varchar(255) PRIMARY KEY,
title varchar(255) NOT NULL,
artist_id varchar(255) NOT NULL,
year smallint NOT NULL,
duration float NOT NULL
);
""")
artist_table_create = ("""
CREATE TABLE IF NOT EXISTS artists (
artist_id varchar(255) PRIMARY KEY,
name varchar(255) NOT NULL,
location varchar(255),
latitude numeric,
longitude numeric
);
""")
time_table_create = ("""
CREATE TABLE IF NOT EXISTS time (
start_time bigint PRIMARY KEY ,
hour smallint NOT NULL,
day smallint NOT NULL,
week smallint NOT NULL,
month smallint NOT NULL,
year smallint NOT NULL,
weekday smallint NOT NULL
);
""")
# INSERT RECORDS
songplay_table_insert = ("""
INSERT INTO songplays(start_time, user_id, level, song_id, artist_id, session_id, location, user_agent)
VALUES(%s,%s,%s,%s,%s,%s,%s,%s);
""")
user_table_insert = ("""
INSERT INTO users (user_id, first_name, last_name, gender, level)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (user_id)
DO UPDATE SET
first_name = EXCLUDED.first_name,
last_name = EXCLUDED.last_name,
gender = EXCLUDED.gender,
level = EXCLUDED.level;
""")
song_table_insert = ("""
INSERT INTO songs (song_id, title, artist_id, year, duration)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (song_id)
DO NOTHING;
""")
artist_table_insert = ("""
INSERT INTO artists (artist_id, name, location, latitude, longitude)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT(artist_id)
DO NOTHING;
""")
time_table_insert = ("""
INSERT INTO time (start_time, hour, day, week, month, year, weekday)
VALUES (%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT(start_time)
DO NOTHING;
""")
# FIND SONGS
song_select = ("""
SELECT songs.song_id, artists.artist_id
FROM songs
LEFT OUTER JOIN artists ON songs.artist_id = artists.artist_id
WHERE
songs.title = %s AND artists.name = %s AND songs.duration = %s;
""")
# QUERY LISTS
create_table_queries = [songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop] | [
2,
10560,
3185,
309,
6242,
28378,
198,
198,
34050,
1759,
62,
11487,
62,
14781,
796,
366,
7707,
3185,
43679,
16876,
7788,
1797,
4694,
3496,
26024,
26033,
198,
7220,
62,
11487,
62,
14781,
796,
366,
7707,
3185,
43679,
16876,
7788,
1797,
46... | 2.331636 | 1,375 |
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# pylint: disable = missing-docstring
import os
import commonl.testing
import tcfl.tc
import tcfl.tc_zephyr_sanity
import tcfl.tl
srcdir = os.path.dirname(__file__)
ttbd = commonl.testing.test_ttbd(config_files = [
os.path.join(srcdir, "conf_00_lib.py"),
os.path.join(srcdir, "conf_zephyr_tests.py"),
])
tcfl.tc.target_c.extension_register(tcfl.app_zephyr.zephyr)
if not tcfl.app.driver_valid(tcfl.app_zephyr.app_zephyr.__name__):
tcfl.app.driver_add(tcfl.app_zephyr.app_zephyr)
tcfl.tc.tc_c.driver_add(tcfl.tc_zephyr_sanity.tc_zephyr_sanity_c)
@tcfl.tc.tags(**tcfl.tl.zephyr_tags())
@tcfl.tc.target(
ttbd.url_spec + " and zephyr_board",
app_zephyr = os.path.join(os.environ['ZEPHYR_BASE'],
"tests", "kernel", "common"))
class _01_simple(tcfl.tc.tc_c):
"""
Zephyr's testcase.yaml build, execute and pass
"""
# app_zephyr provides start() methods start the targets
@staticmethod
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
18,
198,
2,
198,
2,
15069,
357,
66,
8,
2177,
8180,
10501,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
2,
279,
2645,
600,
25,
15560,
7... | 2.181443 | 485 |
'''
Created on 30 Jun 2014
@author: julianporter
'''
import random
import unittest
import multiprocessing
import collections
import tempfile
import time
import os
import sys
from .actions import EventGenerator, EventObserver
if __name__=='__main__':
unittest.main(exit=False)
| [
7061,
6,
198,
41972,
319,
1542,
7653,
1946,
198,
198,
31,
9800,
25,
474,
377,
666,
26634,
198,
7061,
6,
628,
198,
11748,
4738,
198,
11748,
555,
715,
395,
198,
11748,
18540,
305,
919,
278,
198,
11748,
17268,
198,
11748,
20218,
7753,
... | 2.625 | 120 |
# coding: utf-8
"""*****************************************************************************
* Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
################################################################################
#### Component ####
################################################################################
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
37811,
17174,
17174,
4557,
35625,
198,
9,
15069,
357,
34,
8,
13130,
4527,
35902,
8987,
3457,
13,
290,
663,
34943,
13,
198,
9,
198,
9,
15540,
284,
534,
11846,
351,
777,
2846,
11,
345,
743,
779,
... | 4.124324 | 370 |
import numpy as np
import h5py
from typing import List, Tuple, Dict, Callable
| [
11748,
299,
32152,
355,
45941,
198,
11748,
289,
20,
9078,
198,
6738,
19720,
1330,
7343,
11,
309,
29291,
11,
360,
713,
11,
4889,
540,
198
] | 3.12 | 25 |
from django.db import models
from django.contrib.auth.models import AbstractUser, BaseUserManager, PermissionsMixin
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
27741,
12982,
11,
7308,
12982,
13511,
11,
2448,
8481,
35608,
259,
198,
2,
13610,
534,
4981,
994,
13,
198
] | 3.763158 | 38 |
from django.db import models
from django.contrib.postgres.fields import JSONField
from django.utils import timezone
from model_utils.models import TimeStampedModel
from .conf import settings
from .manager import MessageManager, MessageAllManager
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
7353,
34239,
13,
25747,
1330,
19449,
15878,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
198,
6738,
2746,
62,
26791,
13,
27530,
1330,
38... | 3.952381 | 63 |
import numpy as np
import pandas as pd
###############################################################################
# Load data
df_train = pd.read_csv("train_numerical_head.csv")
df_train.head()
feats = df_train.drop(str(42), axis=1)
X_train = feats.values #features
y_train = df_train[str(42)].values #target
df_test = pd.read_csv("test_numerical_head.csv")
df_train.head()
X_test = feats.values #features
###############################################################################
# Drop features
p_to_drop = [ 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0,
0, 0, 0, 0, 1, 1, 1,
1, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0,
0, 0]
for i in range(5, 42):
print(i)
if p_to_drop[i-5] == 0:
df_train = df_train.drop(str(i), axis=1)
df_test = df_test.drop(str(i), axis=1)
###############################################################################
# Save to File
df_train = np.asarray(df_train)
df_train = np.asarray(df_test)
np.savetxt("result_train.csv", df_train, delimiter=",")
np.savetxt("result_test.csv", df_test, delimiter=",")
#plot_r2(y, y_pred2, "Performance of GradientBoostingRegressor")
#plt.show()
#r2_score(y, y_pred2)
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
201,
198,
29113,
29113,
7804,
4242,
21017,
201,
198,
2,
8778,
1366,
201,
198,
7568,
62,
27432,
796,
279,
67,
13,
961,
62,
40664,
7203,
27432,
62,
77... | 2.375486 | 514 |
# -*- coding: utf-8 -*-
"""
http/2 adapter implements
~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import requests
from requests.exceptions import InvalidSchema as _SchemaError
from urllib3.poolmanager import PoolManager, proxy_from_url
from urllib3.util.retry import Retry
from urllib3.util import parse_url
from urllib3.exceptions import ClosedPoolError
from urllib3.exceptions import ConnectTimeoutError
from urllib3.exceptions import HTTPError as _HTTPError
from urllib3.exceptions import MaxRetryError
from urllib3.exceptions import NewConnectionError
from urllib3.exceptions import ProxyError as _ProxyError
from urllib3.exceptions import ProtocolError
from urllib3.exceptions import ReadTimeoutError
from urllib3.exceptions import SSLError as _SSLError
from urllib3.exceptions import ResponseError
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class HTTP2Adapter(requests.adapters.BaseAdapter):
"""The HTTP/2 Adapter for urllib3
Usage::
>>> import requests
>>> from http2_adapter import HTTP2Adapter
>>> s = requests.Session()
>>> a = HTTP2Adapter(max_retries=3)
>>> s.mount("https://", a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK,
**pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def get_connection(self, url):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:rtype: urllib3.ConnectionPool
"""
parsed = urlparse(url)
url = parsed.geturl()
return self.poolmanager.connection_from_url(url)
def close(self):
"""Disposes of any internal state."""
self.poolmanager.clear()
def send(self, request, stream=False, timeout=None, verify=True, cert=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:rtype: requests.Response
"""
scheme = urlparse(request.url).scheme
# FIXME supports the plain HTTP/2
if scheme != "https":
raise _SchemaError("unsupported schema: \"%s\"" % scheme)
conn = self.get_connection(request.url)
self.cert_verify(conn, request.url, verify, cert)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
4023,
14,
17,
21302,
23986,
198,
15116,
8728,
93,
198,
198,
1212,
8265,
4909,
262,
4839,
46363,
326,
9394,
3558,
3544,
284,
8160,
198,
392,
5529,
8787,
... | 2.7227 | 1,511 |
from typing import Any, Dict, Type, TypeVar, Union
import attr
from ..types import UNSET, Unset
T = TypeVar("T", bound="Time")
@attr.s(auto_attribs=True)
class Time:
""" """
time: str
calendarmodel: Union[Unset, str] = UNSET
timezone: Union[Unset, int] = UNSET
before: Union[Unset, int] = UNSET
after: Union[Unset, int] = UNSET
precision: Union[Unset, int] = UNSET
@classmethod
| [
6738,
19720,
1330,
4377,
11,
360,
713,
11,
5994,
11,
5994,
19852,
11,
4479,
198,
198,
11748,
708,
81,
198,
198,
6738,
11485,
19199,
1330,
4725,
28480,
11,
791,
2617,
198,
198,
51,
796,
5994,
19852,
7203,
51,
1600,
5421,
2625,
7575,
... | 2.482143 | 168 |
########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
from cosmo_tester.framework.testenv import TestCase
from cloudify.workflows import local
IGNORED_LOCAL_WORKFLOW_MODULES = (
'worker_installer.tasks',
'plugin_installer.tasks',
'cloudify_agent.operations',
'cloudify_agent.installer.operations',
)
| [
7804,
198,
2,
15069,
357,
66,
8,
1584,
402,
13827,
4561,
2114,
21852,
12052,
13,
1439,
2489,
10395,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
4... | 3.285714 | 280 |
result = []
for astronaut in DATA:
for i, mission in enumerate(astronaut.pop('missions'), start=1):
for field,value in mission.items():
column_name = f'mission{i}_{field}'
astronaut[column_name] = value
result.append(astronaut)
| [
198,
20274,
796,
17635,
198,
1640,
33779,
287,
42865,
25,
198,
220,
220,
220,
329,
1312,
11,
4365,
287,
27056,
378,
7,
459,
1313,
2306,
13,
12924,
10786,
8481,
33809,
923,
28,
16,
2599,
198,
220,
220,
220,
220,
220,
220,
220,
329,
... | 2.445455 | 110 |
# -*- coding: utf-8 -*-
from pyleecan.Classes.Arc import Arc
from pyleecan.Classes.Arc1 import Arc1
from pyleecan.Classes.Arc2 import Arc2
from pyleecan.Classes.Arc3 import Arc3
from pyleecan.Classes.Circle import Circle
from pyleecan.Classes.CondType11 import CondType11
from pyleecan.Classes.CondType12 import CondType12
from pyleecan.Classes.CondType21 import CondType21
from pyleecan.Classes.CondType22 import CondType22
from pyleecan.Classes.Conductor import Conductor
from pyleecan.Classes.Element import Element
from pyleecan.Classes.ElementMat import ElementMat
from pyleecan.Classes.Force import Force
from pyleecan.Classes.ForceMT import ForceMT
from pyleecan.Classes.Frame import Frame
from pyleecan.Classes.GUIOption import GUIOption
from pyleecan.Classes.Hole import Hole
from pyleecan.Classes.HoleM50 import HoleM50
from pyleecan.Classes.HoleM51 import HoleM51
from pyleecan.Classes.HoleM52 import HoleM52
from pyleecan.Classes.HoleM53 import HoleM53
from pyleecan.Classes.HoleM54 import HoleM54
from pyleecan.Classes.HoleMag import HoleMag
from pyleecan.Classes.Import import Import
from pyleecan.Classes.ImportGenMatrixSin import ImportGenMatrixSin
from pyleecan.Classes.ImportGenVectLin import ImportGenVectLin
from pyleecan.Classes.ImportGenVectSin import ImportGenVectSin
from pyleecan.Classes.ImportMatlab import ImportMatlab
from pyleecan.Classes.ImportMatrix import ImportMatrix
from pyleecan.Classes.ImportMatrixVal import ImportMatrixVal
from pyleecan.Classes.ImportMatrixXls import ImportMatrixXls
from pyleecan.Classes.InCurrent import InCurrent
from pyleecan.Classes.InFlux import InFlux
from pyleecan.Classes.InForce import InForce
from pyleecan.Classes.Input import Input
from pyleecan.Classes.LamHole import LamHole
from pyleecan.Classes.LamSlot import LamSlot
from pyleecan.Classes.LamSlotMag import LamSlotMag
from pyleecan.Classes.LamSlotWind import LamSlotWind
from pyleecan.Classes.LamSquirrelCage import LamSquirrelCage
from pyleecan.Classes.Lamination import Lamination
from pyleecan.Classes.Line import Line
from pyleecan.Classes.Machine import Machine
from pyleecan.Classes.MachineAsync import MachineAsync
from pyleecan.Classes.MachineDFIM import MachineDFIM
from pyleecan.Classes.MachineIPMSM import MachineIPMSM
from pyleecan.Classes.MachineSCIM import MachineSCIM
from pyleecan.Classes.MachineSIPMSM import MachineSIPMSM
from pyleecan.Classes.MachineSRM import MachineSRM
from pyleecan.Classes.MachineSyRM import MachineSyRM
from pyleecan.Classes.MachineSync import MachineSync
from pyleecan.Classes.MachineWRSM import MachineWRSM
from pyleecan.Classes.MagFEMM import MagFEMM
from pyleecan.Classes.Magnet import Magnet
from pyleecan.Classes.MagnetFlat import MagnetFlat
from pyleecan.Classes.MagnetPolar import MagnetPolar
from pyleecan.Classes.MagnetType10 import MagnetType10
from pyleecan.Classes.MagnetType11 import MagnetType11
from pyleecan.Classes.MagnetType12 import MagnetType12
from pyleecan.Classes.MagnetType13 import MagnetType13
from pyleecan.Classes.MagnetType14 import MagnetType14
from pyleecan.Classes.Magnetics import Magnetics
from pyleecan.Classes.MatEconomical import MatEconomical
from pyleecan.Classes.MatElectrical import MatElectrical
from pyleecan.Classes.MatHT import MatHT
from pyleecan.Classes.MatLamination import MatLamination
from pyleecan.Classes.MatMagnet import MatMagnet
from pyleecan.Classes.MatMagnetics import MatMagnetics
from pyleecan.Classes.MatStructural import MatStructural
from pyleecan.Classes.Material import Material
from pyleecan.Classes.Mesh import Mesh
from pyleecan.Classes.MeshSolution import MeshSolution
from pyleecan.Classes.Node import Node
from pyleecan.Classes.NodeMat import NodeMat
from pyleecan.Classes.Notch import Notch
from pyleecan.Classes.NotchEvenDist import NotchEvenDist
from pyleecan.Classes.OutElec import OutElec
from pyleecan.Classes.OutGeo import OutGeo
from pyleecan.Classes.OutGeoLam import OutGeoLam
from pyleecan.Classes.OutMag import OutMag
from pyleecan.Classes.OutPost import OutPost
from pyleecan.Classes.OutStruct import OutStruct
from pyleecan.Classes.Output import Output
from pyleecan.Classes.PolarArc import PolarArc
from pyleecan.Classes.Segment import Segment
from pyleecan.Classes.Shaft import Shaft
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Classes.Simulation import Simulation
from pyleecan.Classes.Slot import Slot
from pyleecan.Classes.Slot19 import Slot19
from pyleecan.Classes.SlotMFlat import SlotMFlat
from pyleecan.Classes.SlotMPolar import SlotMPolar
from pyleecan.Classes.SlotMag import SlotMag
from pyleecan.Classes.SlotW10 import SlotW10
from pyleecan.Classes.SlotW11 import SlotW11
from pyleecan.Classes.SlotW12 import SlotW12
from pyleecan.Classes.SlotW13 import SlotW13
from pyleecan.Classes.SlotW14 import SlotW14
from pyleecan.Classes.SlotW15 import SlotW15
from pyleecan.Classes.SlotW16 import SlotW16
from pyleecan.Classes.SlotW21 import SlotW21
from pyleecan.Classes.SlotW22 import SlotW22
from pyleecan.Classes.SlotW23 import SlotW23
from pyleecan.Classes.SlotW24 import SlotW24
from pyleecan.Classes.SlotW25 import SlotW25
from pyleecan.Classes.SlotW26 import SlotW26
from pyleecan.Classes.SlotW27 import SlotW27
from pyleecan.Classes.SlotW28 import SlotW28
from pyleecan.Classes.SlotW29 import SlotW29
from pyleecan.Classes.SlotW60 import SlotW60
from pyleecan.Classes.SlotW61 import SlotW61
from pyleecan.Classes.SlotWind import SlotWind
from pyleecan.Classes.Solution import Solution
from pyleecan.Classes.SolutionFEMM import SolutionFEMM
from pyleecan.Classes.Structural import Structural
from pyleecan.Classes.SurfLine import SurfLine
from pyleecan.Classes.Surface import Surface
from pyleecan.Classes.Trapeze import Trapeze
from pyleecan.Classes.Unit import Unit
from pyleecan.Classes.VentilationCirc import VentilationCirc
from pyleecan.Classes.VentilationPolar import VentilationPolar
from pyleecan.Classes.VentilationTrap import VentilationTrap
from pyleecan.Classes.Winding import Winding
from pyleecan.Classes.WindingCW1L import WindingCW1L
from pyleecan.Classes.WindingCW2LR import WindingCW2LR
from pyleecan.Classes.WindingCW2LT import WindingCW2LT
from pyleecan.Classes.WindingDW1L import WindingDW1L
from pyleecan.Classes.WindingDW2L import WindingDW2L
from pyleecan.Classes.WindingSC import WindingSC
from pyleecan.Classes.WindingUD import WindingUD
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
279,
2349,
721,
272,
13,
9487,
274,
13,
24021,
1330,
10173,
198,
6738,
279,
2349,
721,
272,
13,
9487,
274,
13,
24021,
16,
1330,
10173,
16,
198,
6738,
279,
... | 2.915823 | 2,174 |
from django.conf.urls import url
from django.conf.urls import patterns
from pyday_social_network import views
app_name = 'pyday_social_network'
urlpatterns = [
url(r'^upload_picture/$', views.UploadPictureView.as_view(), name='upload_picture'),
url(r'^upload_song/$', views.UploadSongView.as_view(), name='upload_song'),
url(r'^register/$', views.RegisterView.as_view(), name='register_login'),
url(r'^login/$', views.login_user, name='login'),
url(r'^main/$', views.main, name='main'),
url(r'^logout/$', views.logout_user, name='logout'),
url(r'^all/$', views.display_all_users, name='all_users'),
url(r'^following/$', views.display_following, name='following'),
url(r'^followers/$', views.display_followers, name='followers'),
url(r'^friends/$', views.display_friends, name='friends'),
url(r'^follow/(?P<user>\d+)$', views.follow, name='follow'),
url(r'^unfollow/(?P<user>\d+)$', views.unfollow, name='unfollow'),
url(r'^profile/(?P<user>\d*)$', views.display_profile, name='profile'),
url(r'^search/$', views.search_user, name='search'),
]
'''urlpatterns += patterns('pyday_social_network.views',
url(r'^list/$', 'list', name='list'))
'''
# handler404 = 'pyday.views.404'
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
198,
198,
6738,
12972,
820,
62,
14557,
62,
27349,
1330,
5009,
198,
198,
1324,
62,
3672,
796,
705,
9078,
820,
62,
... | 2.53629 | 496 |
import numpy as np
import pandas as pd
from ploomber.testing import pandas as pd_t
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
458,
4207,
527,
13,
33407,
1330,
19798,
292,
355,
279,
67,
62,
83,
628,
628,
198
] | 2.9 | 30 |
import swgpy
from swgpy.object import *
from swgpy.sui import RadialMenu, RadialOptions, RadialOptionsList, RadialIdentifier
from swgpy.static import ElevatorData, ElevatorDataList
from swgpy.utility import vector3, quat | [
11748,
1509,
70,
9078,
198,
6738,
1509,
70,
9078,
13,
15252,
1330,
1635,
198,
6738,
1509,
70,
9078,
13,
2385,
72,
1330,
5325,
498,
23381,
11,
5325,
498,
29046,
11,
5325,
498,
29046,
8053,
11,
5325,
498,
33234,
7483,
198,
6738,
1509,
... | 3.188406 | 69 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Imports
########################################
import sys, os
SciAnalysis_PATH='/home/kyager/current/code/SciAnalysis/main/'
SciAnalysis_PATH in sys.path or sys.path.append(SciAnalysis_PATH)
import glob
from SciAnalysis import tools
from SciAnalysis.Data import *
#from SciAnalysis.XSAnalysis.Data import *
#from SciAnalysis.XSAnalysis import Protocols
import SciAnalysis.colormaps as cmaps
plt.register_cmap(name='viridis', cmap=cmaps.viridis)
plt.register_cmap(name='magma', cmap=cmaps.magma)
plt.register_cmap(name='inferno', cmap=cmaps.inferno)
plt.register_cmap(name='plasma', cmap=cmaps.plasma)
plt.set_cmap(cmaps.viridis)
root_dir = './'
source_dir = os.path.join(root_dir, './')
output_dir = os.path.join(root_dir, './')
theta_incident = np.linspace(0, 1.0, num=5000)
# At 13.5 keV
# Ge: 0.186
#critical_angle = 0.186
# Si: 0.132
#critical_angle = 0.132
# SiO2: 0.141
#critical_angle = 0.141
# Polystyrene: 0.090
#critical_angle = 0.090
critical_angle_film = 0.090
critical_angle_substrate = 0.132
critical_angle = critical_angle_substrate
lambda_A = 0.9184
k = 2.*np.pi/lambda_A
kpre = 2.*k
# Angles are full angles (2theta)
if False:
# Test nonlinear transformation
lambda_A = 0.9184/100
k = 2.*np.pi/lambda_A
kpre = 2.*k
# Angles are full angles (2theta)
lines = DataLines_current()
lines.x_axis = theta_incident
if False:
# Horizon
line = DataLine(x=theta_incident, y=angle_to_q(horizon))
lines.add_line(line)
line.plot_args['color'] = '0.5'
line.plot_args['linestyle'] = '-'
line.plot_args['linewidth'] = 4.0
line.plot_args['marker'] = None
line.plot_args['dashes'] = [5,5]
if False:
# Specular reflection
specular = 2.0*theta_incident
line = DataLine(x=theta_incident, y=angle_to_q(specular))
lines.add_line(line)
line.plot_args['color'] = 'purple'
line.plot_args['linestyle'] = '-'
line.plot_args['linewidth'] = 4.0
line.plot_args['marker'] = None
if False:
incident_angle_exp = npzfile['incident_angle_experiment']
critical_angle_exp = npzfile['Yoneda_Ge_experiment']
#print(incident_angle_exp)
line = DataLine(x=incident_angle_exp, y=critical_angle_exp, name='critical_angle_expJL')
lines.add_line(line)
line.plot_args['color'] = 'purple'
line.plot_args['linestyle'] = 'o'
line.plot_args['linewidth'] = 4.0
line.plot_args['markersize'] = 12.0
horizon = 1.0*theta_incident
specular = 2.0*theta_incident
if True:
# T
alpha_i_rad = np.arccos( np.cos(np.radians(theta_incident))/np.cos(np.radians(critical_angle_substrate)) )
alpha_i_deg = np.degrees(alpha_i_rad)
two_theta_s_deg = np.where( theta_incident>critical_angle_substrate, theta_incident - alpha_i_deg, horizon )
T = angle_to_q(two_theta_s_deg)
line = DataLine(x=theta_incident, y=T, name='direct_beam_substrate')
lines.add_line(line)
line.plot_args['color'] = 'b'
line.plot_args['linestyle'] = '-'
line.plot_args['linewidth'] = 4.0
line.plot_args['marker'] = None
#line.plot_args['markersize'] = 12.0
if True:
# T
alpha_i_rad = np.arccos( np.cos(np.radians(theta_incident))/np.cos(np.radians(critical_angle_film)) )
alpha_i_deg = np.degrees(alpha_i_rad)
two_theta_s_deg = np.where( theta_incident>critical_angle_film, theta_incident - alpha_i_deg, horizon )
T = angle_to_q(two_theta_s_deg)
line = DataLine(x=theta_incident, y=T, name='direct_beam_film')
lines.add_line(line)
line.plot_args['color'] = 'b'
line.plot_args['linestyle'] = '-'
line.plot_args['linewidth'] = 4.0
line.plot_args['marker'] = None
#line.plot_args['markersize'] = 12.0
# Show the detector position (qz) of candidate reciprocal-space (Qz) peaks
#for Qz_consider in np.arange(0.0, 0.20, 0.005):
#for Qz_consider in np.arange(-0.2, 0.20, 0.01):
#for Qz_consider in [0.01, 0.03, 0.06, 0.09]:
xi, xf = 0, 1
Qz_list = np.linspace(angle_to_q(xi), angle_to_q(xf), num=100)
Delta_MATRIX = np.zeros( (len(Qz_list), len(theta_incident)) )
for im, Qz_consider in enumerate(Qz_list):
if im%50==0:
print('im = {}/{} ({:.1f} % done)'.format(im, len(Qz_list), 100.0*im/len(Qz_list)))
two_alpha_s_deg = q_to_angle(Qz_consider)
#two_alpha_s_deg_vector = np.ones(len(theta_incident))*two_alpha_s_deg
qz_measure_T = np.zeros(len(theta_incident))
qz_measure_R = np.zeros(len(theta_incident))
# True position
if False:
Qz_true = np.ones(len(theta_incident))*Qz_consider
line = DataLine(x=theta_incident, y=Qz_true, name='Qz_true')
lines.add_line(line)
line.plot_args['color'] = 'blue'
line.plot_args['linestyle'] = '-'
line.plot_args['linewidth'] = 1.0
line.plot_args['marker'] = None
line.plot_args['alpha'] = 0.2
# T channel refracted peak
if True:
# Above horizon component
if True:
alpha_i_rad = np.arccos( np.cos(np.radians(theta_incident))/np.cos(np.radians(critical_angle_film)) )
alpha_i_deg = np.where( theta_incident>critical_angle_film, np.degrees(alpha_i_rad), 0.0 )
# Above-horizon
alpha_f_deg = two_alpha_s_deg - alpha_i_deg
alpha_f_rad = np.radians(alpha_f_deg)
theta_f_rad = np.arccos( np.cos(np.radians(critical_angle_film))*np.cos(alpha_f_rad) )
theta_f_deg = np.where( alpha_i_deg<two_alpha_s_deg, np.degrees(theta_f_rad), np.nan ) # Only valid above horizon
two_theta_s_deg = theta_incident + theta_f_deg
two_theta_s_deg = np.where( theta_incident>critical_angle_film, two_theta_s_deg, np.nan)
qz = angle_to_q(two_theta_s_deg)
qz_measure_T += np.where(np.isnan(qz), 0, qz)
#line = DataLine(x=theta_incident, y=qz, name='Tpeak')
#lines.add_line(line)
#line.plot_args['color'] = 'blue'
#line.plot_args['linestyle'] = '-'
#line.plot_args['linewidth'] = 3.0
#line.plot_args['marker'] = None
#line.plot_args['alpha'] = 0.8
# Below film critical angle
# (incident beam traveling along horizon)
if True:
two_theta_s_deg = theta_incident + theta_f_deg
two_theta_s_deg = np.where( theta_incident<critical_angle_film, two_theta_s_deg, np.nan)
qz = angle_to_q(two_theta_s_deg)
qz_measure_T += np.where(np.isnan(qz), 0, qz)
qz_measure_R += np.where(np.isnan(qz), 0, qz)
#line = DataLine(x=theta_incident, y=qz, name='Tpeak')
#lines.add_line(line)
#line.plot_args['color'] = 'blue'
#line.plot_args['linestyle'] = '-'
#line.plot_args['linewidth'] = 3.0
#line.plot_args['marker'] = None
#line.plot_args['alpha'] = 0.2
# Below-horizon (film refraction only)
if False:
two_theta_s_deg = np.where( alpha_i_deg>two_alpha_s_deg, theta_incident - alpha_i_deg + two_alpha_s_deg , np.nan ) # Only valid below horizon
qz = angle_to_q(two_theta_s_deg)
#line = DataLine(x=theta_incident, y=qz, name='Tpeak')
#lines.add_line(line)
#line.plot_args['color'] = 'blue'
#line.plot_args['linestyle'] = '-'
#line.plot_args['linewidth'] = 3.0
#line.plot_args['marker'] = None
#line.plot_args['alpha'] = 0.2
# Below-horizon (film and substrate refractions)
if True:
incident_prime_deg = alpha_i_deg - two_alpha_s_deg
incident_prime_rad = np.radians(incident_prime_deg)
# Refraction at film-substrate interface
n_ratio = np.cos(np.radians(critical_angle_substrate))/np.cos(np.radians(critical_angle_film))
output_prime_rad = np.arccos( np.cos(incident_prime_rad)/n_ratio )
output_prime_deg = np.degrees(output_prime_rad)
two_theta_s_deg = theta_incident - output_prime_deg
two_theta_s_deg = np.where( alpha_i_deg>two_alpha_s_deg, two_theta_s_deg, np.nan ) # Only valid below horizon
two_theta_s_deg = np.where( theta_incident>critical_angle_film, two_theta_s_deg, np.nan ) # Don't show when incident beam below critical angle
qz = angle_to_q(two_theta_s_deg)
qz_measure_T += np.where(np.isnan(qz), 0, qz)
#line = DataLine(x=theta_incident, y=qz, name='Tpeak')
#lines.add_line(line)
#line.plot_args['color'] = 'blue'
#line.plot_args['linestyle'] = '-'
#line.plot_args['linewidth'] = 3.0
#line.plot_args['marker'] = None
#line.plot_args['alpha'] = 0.8
# Scattering refracted along horizon
if True:
two_theta_s_deg = theta_incident - output_prime_deg
two_theta_s_deg = np.where( np.isnan(two_theta_s_deg) , horizon, np.nan)
two_theta_s_deg = np.where( alpha_i_deg>two_alpha_s_deg, two_theta_s_deg, np.nan ) # Only valid below horizon
qz = angle_to_q(two_theta_s_deg)
qz_measure_T += np.where(np.isnan(qz), 0, qz)
#line = DataLine(x=theta_incident, y=qz, name='Tpeak')
#lines.add_line(line)
#line.plot_args['color'] = 'blue'
#line.plot_args['linestyle'] = '-'
#line.plot_args['linewidth'] = 3.0
#line.plot_args['marker'] = None
#line.plot_args['alpha'] = 0.2
# Below film critical angle
# (incident beam traveling along horizon)
if False and two_alpha_s_deg<0:
angle_vals = theta_incident - output_prime_deg
angle_vals = np.where( theta_incident<critical_angle_film, angle_vals, np.nan )
qz = angle_to_q(angle_vals)
line = DataLine(x=theta_incident, y=qz, name='Tpeak')
lines.add_line(line)
line.plot_args['color'] = 'blue'
line.plot_args['linestyle'] = '-'
line.plot_args['linewidth'] = 3.0
line.plot_args['marker'] = None
line.plot_args['alpha'] = 0.2
# R channel refracted peak
if True:
if True:
alpha_i_rad = np.arccos( np.cos(np.radians(theta_incident))/np.cos(np.radians(critical_angle_film)) )
alpha_i_deg = np.where( theta_incident>critical_angle_film, np.degrees(alpha_i_rad), 0.0 )
alpha_f_rad = np.radians(two_alpha_s_deg) + alpha_i_rad
theta_f_rad = np.arccos( np.cos(np.radians(critical_angle_film))*np.cos(alpha_f_rad) )
theta_f_deg = np.degrees(theta_f_rad)
two_theta_s_deg = theta_incident + theta_f_deg
qz = angle_to_q(two_theta_s_deg)
qz_measure_R += np.where(np.isnan(qz), 0, qz)
#line = DataLine(x=theta_incident, y=qz, name='Rpeak')
#lines.add_line(line)
#line.plot_args['color'] = 'red'
#line.plot_args['linestyle'] = '-'
#line.plot_args['linewidth'] = 3.0
#line.plot_args['marker'] = None
#line.plot_args['alpha'] = 0.8
# Scattering goes below horizon
if False and two_alpha_s_deg<0:
# For this to occur:
# two_alpha_s_deg < 0
# |two_alpha_s_deg| > |alpha_i_deg|
incident_prime_deg = np.where( theta_incident>critical_angle_film, np.abs(two_alpha_s_deg) - alpha_i_deg, np.nan)
incident_prime_rad = np.radians(incident_prime_deg)
# Refraction at film-substrate interface
n_ratio = np.cos(np.radians(critical_angle_substrate))/np.cos(np.radians(critical_angle_film))
output_prime_rad = np.arccos( np.cos(incident_prime_rad)/n_ratio )
output_prime_deg = np.degrees(output_prime_rad)
two_theta_s_deg = np.where( np.abs(two_alpha_s_deg)>np.abs(alpha_i_deg), theta_incident - output_prime_deg, np.nan )
qz = angle_to_q(two_theta_s_deg)
line = DataLine(x=theta_incident, y=qz, name='Rpeak')
lines.add_line(line)
line.plot_args['color'] = 'red'
line.plot_args['linestyle'] = '-'
line.plot_args['linewidth'] = 3.0
line.plot_args['marker'] = None
line.plot_args['alpha'] = 0.8
# Scattering traveling along horizon
# (at film-substrate interface, since beam is hitting substrate interface below its critical angle)
if True:
internal_critical_angle_rad = np.arccos(n_ratio)
angle_vals = np.where( incident_prime_rad<internal_critical_angle_rad, horizon, np.nan )
angle_vals = np.where( incident_prime_rad>0, angle_vals, np.nan )
qz = angle_to_q(angle_vals)
line = DataLine(x=theta_incident, y=qz, name='Rpeak')
lines.add_line(line)
line.plot_args['color'] = 'red'
line.plot_args['linestyle'] = '-'
line.plot_args['linewidth'] = 3.0
line.plot_args['marker'] = None
line.plot_args['alpha'] = 0.2
# Amount of refraction distortion
if True:
Qz_true = np.ones(len(theta_incident))*Qz_consider
Delta_qz_T = qz_measure_T - Qz_true
Delta_qz_R = qz_measure_R - Qz_true
delta_Delta = Delta_qz_R - Delta_qz_T
delta_Delta_m = delta_Delta - angle_to_q(2*theta_incident)
line = DataLine(x=theta_incident, y=Delta_qz_T, name='Delta')
#lines.add_line(line)
lines.extra_line = line
lines.Qz_consider = Qz_consider
line.plot_args['color'] = 'blue'
line.plot_args['linestyle'] = '-'
line.plot_args['linewidth'] = 3.0
line.plot_args['marker'] = None
line.plot_args['alpha'] = 0.8
Delta_MATRIX[im,:] = Delta_qz_T
lines.x_axis = theta_incident
lines.y_axis = Qz_list
lines.M = Delta_MATRIX
lines.plot_args = { 'color' : 'k',
'marker' : 'o',
'linewidth' : 3.0,
'legend_location': 'NE'
}
lines.plot_args['rcParams'] = {
'axes.labelsize': 45,
'xtick.labelsize': 35,
'ytick.labelsize': 35,
#'legend.borderpad' : 0 ,
'legend.fontsize' : 30 ,
#'legend.numpoints' : 1 ,
#'legend.handlelength' : 1.0 ,
'legend.labelspacing' : 0.25 ,
'legend.handletextpad' : 0.5 ,
#'legend.columnspacing' : 0.0 ,
'xtick.major.pad': 14,
'ytick.major.pad': 14,
}
lines.x_label = 'angle'
lines.x_rlabel = r'$\theta_i \, (^{\circ})$'
lines.y_label = 'qz'
lines.y_rlabel = '$Q_z \, (\mathrm{\AA^{-1}})$'
outfile = os.path.join(output_dir, 'fig-refraction_map.png')
lines.plot(save=outfile, plot_range=[xi, xf, None, None], plot_buffers=[0.27, 0.16, 0.16, 0.05], _xticks=[0, 0.04, 0.08, 0.12], dpi=200)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
1846,
3742,
198,
29113,
7804,
198,
198,
11748,
25064,
11,
28686,
198,
50,
979,
32750,
62,
34219,
11639,
14,
1119... | 1.872801 | 8,640 |
import numpy as np
from PIL import Image
src = np.array(Image.open('data/src/lena.jpg'))
mask = np.array(Image.open('data/src/horse_r.png').resize(src.shape[1::-1], Image.BILINEAR))
print(mask.dtype, mask.min(), mask.max())
# uint8 0 255
mask = mask / 255
print(mask.dtype, mask.min(), mask.max())
# float64 0.0 1.0
dst = src * mask
Image.fromarray(dst.astype(np.uint8)).save('data/dst/numpy_image_mask.jpg')
# 
mask = np.array(Image.open('data/src/horse_r.png').convert('L').resize(src.shape[1::-1], Image.BILINEAR))
print(mask.shape)
# (225, 400)
mask = mask / 255
# dst = src * mask
# ValueError: operands could not be broadcast together with shapes (225,400,3) (225,400)
# mask = mask[:, :, np.newaxis]
mask = mask.reshape(*mask.shape, 1)
print(mask.shape)
# (225, 400, 1)
dst = src * mask
Image.fromarray(dst.astype(np.uint8)).save('data/dst/numpy_image_mask_l.jpg')
# 
| [
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
198,
198,
10677,
796,
45941,
13,
18747,
7,
5159,
13,
9654,
10786,
7890,
14,
10677,
14,
75,
8107,
13,
9479,
6,
4008,
198,
27932,
796,
45941,
13,
18747,
7,
5159,
13,
965... | 2.366093 | 407 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import dill as pickle
import rpy2.robjects as robjects
readRDS = robjects.r['readRDS']
r_stan_data = readRDS('r_stan_data.rds')
with open('python_stan_data.pkl', 'rb') as file:
py_stan_data = pickle.load(file)
py_stan_data.keys()
for key in py_stan_data.keys():
mycheck(key)
fig, ax = plt.subplots(1, 2, dpi=150, figsize=(10,2))
ax[0].plot(np.array(r_stan_data.rx2('f')))
ax[1].plot(np.array(py_stan_data['f']))
plt.tight_layout()
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
11748,
288,
359,
355,
2298,
293,
198,
198,
11748,
374,
9078,
17,
13,
22609,
752,
82,
355,
3... | 2.226891 | 238 |
#
# * The source code in this file is based on the soure code of CuPy.
#
# # NLCPy License #
#
# Copyright (c) 2020-2021 NEC Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither NEC Corporation nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# # CuPy License #
#
# Copyright (c) 2015 Preferred Infrastructure, Inc.
# Copyright (c) 2015 Preferred Networks, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
import functools
import os
import unittest
def repeat_with_success_at_least(times, min_success):
"""Decorator for multiple trial of the test case.
The decorated test case is launched multiple times.
The case is judged as passed at least specified number of trials.
If the number of successful trials exceeds `min_success`,
the remaining trials are skipped.
Args:
times(int): The number of trials.
min_success(int): Threshold that the decorated test
case is regarded as passed.
"""
assert times >= min_success
return _repeat_with_success_at_least
def repeat(times, intensive_times=None):
"""Decorator that imposes the test to be successful in a row.
Decorated test case is launched multiple times.
The case is regarded as passed only if it is successful
specified times in a row.
.. note::
In current implementation, this decorator grasps the
failure information of each trial.
Args:
times(int): The number of trials in casual test.
intensive_times(int or None): The number of trials in more intensive
test. If ``None``, the same number as `times` is used.
"""
if intensive_times is None:
return repeat_with_success_at_least(times, times)
casual_test = bool(int(os.environ.get('NLCPY_TEST_CASUAL', '0')))
times_ = times if casual_test else intensive_times
return repeat_with_success_at_least(times_, times_)
def retry(times):
"""Decorator that imposes the test to be successful at least once.
Decorated test case is launched multiple times.
The case is regarded as passed if it is successful
at least once.
.. note::
In current implementation, this decorator grasps the
failure information of each trial.
Args:
times(int): The number of trials.
"""
return repeat_with_success_at_least(times, 1)
| [
2,
198,
2,
1635,
383,
2723,
2438,
287,
428,
2393,
318,
1912,
319,
262,
24049,
260,
2438,
286,
14496,
20519,
13,
198,
2,
198,
2,
1303,
399,
5639,
20519,
13789,
1303,
198,
2,
198,
2,
220,
220,
220,
220,
15069,
357,
66,
8,
12131,
1... | 3.118721 | 1,533 |
import anialtools as alt
nwdir = '/home/jsmith48/scratch/ccsd_extrapolation/ccsd_train/tl_train_dhl_7/'
h5dir = '/home/jsmith48/scratch/ccsd_extrapolation/h5files/train/cmb/'
Nnets = 8 # networks in ensemble
Nblock = 16 # Number of blocks in split
Nbvald = 2 # number of valid blocks
Nbtest = 1 # number of test blocks
netdict = {'iptfile' :nwdir+'inputtrain.ipt',
'cnstfile':nwdir+'rHCNO-5.2R_16-3.5A_a4-8.params',
'saefile' :nwdir+'sae_linfit.dat',
'atomtyp' :['H','C','N','O']}
GPU = [2,3,4,5]
## Train the ensemble ##
aet = alt.alaniensembletrainer(nwdir, netdict, h5dir, Nnets)
aet.build_strided_training_cache(Nblock,Nbvald,Nbtest,build_test=False,forces=False)
aet.train_ensemble(GPU)
| [
11748,
281,
498,
31391,
355,
5988,
198,
198,
47516,
15908,
796,
31051,
11195,
14,
73,
21453,
2780,
14,
1416,
36722,
14,
535,
21282,
62,
2302,
2416,
21417,
14,
535,
21282,
62,
27432,
14,
28781,
62,
27432,
62,
67,
18519,
62,
22,
14,
6... | 2.212121 | 330 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from libcst.codemod._cli import (
ParallelTransformResult,
diff_code,
exec_transform_with_prettyprint,
gather_files,
parallel_exec_transform_with_prettyprint,
)
from libcst.codemod._codemod import Codemod
from libcst.codemod._command import (
CodemodCommand,
MagicArgsCodemodCommand,
VisitorBasedCodemodCommand,
)
from libcst.codemod._context import CodemodContext
from libcst.codemod._runner import (
SkipFile,
SkipReason,
TransformExit,
TransformFailure,
TransformResult,
TransformSkip,
TransformSuccess,
transform_module,
)
from libcst.codemod._testing import CodemodTest
from libcst.codemod._visitor import ContextAwareTransformer, ContextAwareVisitor
__all__ = [
"Codemod",
"CodemodContext",
"CodemodCommand",
"VisitorBasedCodemodCommand",
"MagicArgsCodemodCommand",
"ContextAwareTransformer",
"ContextAwareVisitor",
"ParallelTransformResult",
"TransformSuccess",
"TransformFailure",
"TransformExit",
"SkipReason",
"TransformSkip",
"SkipFile",
"TransformResult",
"CodemodTest",
"transform_module",
"gather_files",
"exec_transform_with_prettyprint",
"parallel_exec_transform_with_prettyprint",
"diff_code",
]
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,
262,
6808,
8619,
286,
428,
2723,
5509,
13,
198,
... | 2.746667 | 525 |
#!/bin/python3
| [
2,
48443,
8800,
14,
29412,
18,
198
] | 2.142857 | 7 |
import json
from pathlib import Path
import tarfile
PREDICTIONS_DIR = "./data/predictions"
folder = Path(PREDICTIONS_DIR)
pred_paths = folder.glob("**/*.jsonl")
compiled_errors_path = "error_analysis/errors.jsonl"
zipped_path = "error_analysis/errors.tar.gz"
with open(compiled_errors_path, "w") as file:
pass
for path in pred_paths:
experiment_id = path.parent.parts[-1]
model_name = path.parent.parent.parts[-1]
train_type = path.parent.parent.parent.parts[-1]
with open(compiled_errors_path, "a") as w_file:
with open(path) as file:
for line in file:
data = json.loads(line)
if not data["match"] and not set("<UNK>").intersection(data["original"]):
error = {"experiment_id": experiment_id, "model_name": model_name, "train_type": train_type, **data}
w_file.write(json.dumps(error, ensure_ascii=False) + "\n")
with tarfile.open(zipped_path, "w:gz") as file:
file.add(compiled_errors_path)
| [
11748,
33918,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
13422,
7753,
198,
198,
4805,
1961,
18379,
11053,
62,
34720,
796,
366,
19571,
7890,
14,
28764,
9278,
1,
198,
198,
43551,
796,
10644,
7,
4805,
1961,
18379,
11053,
62,
34720,
8,... | 2.320366 | 437 |
import asyncio
import json
import uuid
from json.decoder import JSONDecodeError
import discord
from discord.ext import commands, tasks
from lib import (ReactionPersistentView,
Database, has_permissions,
)
from DiscordUtils import Embed
| [
11748,
30351,
952,
198,
11748,
33918,
198,
11748,
334,
27112,
198,
6738,
33918,
13,
12501,
12342,
1330,
19449,
10707,
1098,
12331,
198,
198,
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
11,
8861,
198,
6738,
9195,
1330,
357,
3041,... | 2.814433 | 97 |
import numpy as np
import pandas as pd
import os
import datetime
import requests
from huey.models import Buoy, BuoyRealtimeWaveDetail, BuoyRawSpectralWaveData
id ).order_by(BuoyRealtimeWaveDetail.ts.desc()).first()
realtime_url = f"https://www.ndbc.noaa.gov/data/realtime2/{station_id}.spec"
df = pd.read_csv(realtime_url, delim_whitespace=True)
df = df.replace('MM', np.NaN)
# skip first row which is header
for (index, row) in df[1:].iterrows():
ob = BuoyRealtimeWaveDetail.from_pd_row(row)
ob.buoy = buoy
if (latest_ob is None or ob.ts > latest_ob.ts):
print(f"inserting observation for date: {ob.ts}")
db_session.add(ob)
else:
print(f"observation for date: {ob.ts} already present, skipping.")
break
db_session.commit()
print("import complete")
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
11748,
4818,
8079,
198,
11748,
7007,
198,
198,
6738,
37409,
88,
13,
27530,
1330,
9842,
726,
11,
9842,
726,
15633,
2435,
39709,
11242,
603,
11,
... | 2.345109 | 368 |
# Initial imports
from calendar import day_abbr
import nltk
import pandas as pd
from pathlib import Path
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.tokenize import word_tokenize, sent_tokenize
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, PorterStemmer
from nltk.util import ngrams
from string import punctuation
import re
from collections import Counter
import pandasPalmer as pp
from datetime import datetime
from inspect import signature
#import NLTK.fns as nl
#%matplotlib inline
# Download/Update the VADER Lexicon
nltk.download("vader_lexicon")
def get_sentiment(score):
"""
Calculates the sentiment based on the compound score.
"""
result = 0 # Neutral by default
if score >= 0.05: # Positive
result = 1
elif score <= -0.05: # Negative
result = -1
return result
# Function to create a dataframe from NewsApi articles
def Create_News_df(news_articles, language):
"""Creates a dataframe from NewsApi articles
Args:
news_articles (pd.DataFrame): Dictionary of articles from NewsApi
language (str): language type e.g. en,fr etc.
Returns:
pd.DataFrame: Containing the author, title, description, date and text of the articles
"""
articles_list = []
for article in news_articles:
try:
author = article["author"]
title = article["title"]
description = article["description"]
text = article["content"]
date_str = article["publishedAt"][:10]
articles_list.append({
"author": author,
"title": title,
"description": description,
"text": text,
"date": datetime.fromisoformat(date_str),
"language": language
})
except AttributeError:
pass
return pd.DataFrame(articles_list)
# Instantiate the lemmatizer
lemmatizer = WordNetLemmatizer()
stemp = PorterStemmer()
# Create a list of stopwords
sw = set(stopwords.words('english'))
# Tokenizer with various strategies
# Separate the word using a bar instead of a comma
bar_sep = lambda x: "|".join(x)
# Separate the words using as parameter
big_string = lambda x,sep=" ": sep.join(x)
# Generates ngrams based on the Parameters passed bigrams by default
bi_grams = lambda x : dict(Counter(ngrams(x, n=2)).most_common())
tri_grams = lambda x : dict(Counter(ngrams(x, n=3)).most_common())
N_grams = lambda text, n=2: dict(Counter(ngrams(Regular_Tokenizer(text), n=n)).most_common())
# returns the words along with their frequency
word_count = lambda x: (dict(Counter(x).most_common(n=None)))
def tokenizer(text,Return_string=False, Post_Processor=None, Stem=False):
"""
Takes text and turns them in to either a list of words,
a comma separated string of words or a list Dataframe listing the most common words
Args:
text (str): text sentences or article.
Return_string (bool, optional): Determines if a string or a list of string should be returned. Defaults to False.
Post_Processor ([type], optional): The post processing function to perform on the results. Defaults to None.
Stem (bool, optional): Performs an aditional step of steming the words
Returns:
list | str: Depends on the post processor passed into the function
"""
# Remove the punctuation from text
# https://stackoverflow.com/questions/265960/best-way-to-strip-punctuation-from-a-string
text = text.translate(str.maketrans('', '', punctuation))
# Create a tokenized list of the words
words = word_tokenize(text)
# Lemmatize words into root words
lems = [lemmatizer.lemmatize(word) for word in words]
if Stem:
# ****** Stemmer PorterStemmer ******
lems = [stemp.stem(word=word) for word in lems]
# Convert the words to lowercase
output = [word.lower() for word in lems]
# Remove the stop words
tokens = [word for word in output if word not in sw]
if Post_Processor is not None and callable(Post_Processor):
sig = signature(Post_Processor)
if sig.parameters.count == 1:
return Post_Processor(tokens)
if Return_string:
return ",".join(tokens)
else:
return tokens
#_________________________________________________________________________
# Create Dataframe of N_grams
# Calculate the VADER Sentiment Score for Text columns in a DataFrame or those specified in a list
def Attach_Sentiment_Scores_2_df(df:pd.DataFrame, txt_cols=None):
"""Calculates the overall sentiment of text columns in a DataFrame and adds columns to the DataFrame with the results"""
# Initialize the VADER sentiment analyzer
analyzer = SentimentIntensityAnalyzer()
# Get the list of columns to work with
if txt_cols is not None:
if type(txt_cols) == list:
pass
elif type(txt_cols) == str:
txt_cols = [txt_cols]
else: # There was no list of columns passed
txt_cols = pp.Get_Columns_With_Text(df)
if txt_cols is None:
print("THERE WAS NO TEXT COLUMNS TO ANALYZE !!!!!!!!!!!!!!!!!!")
return df
# Get the sentiment for each row in the dataframe
for col in txt_cols:
# Create Sentiment Scoring Dictionary
sentiment_dict = {
f"{col}_compound": [],
f"{col}_pos": [],
f"{col}_neu": [],
f"{col}_neg": [],
f"{col}_sent": []
}
for _, row in df.iterrows():
# Calculates the sentiment for the column
sentiment = analyzer.polarity_scores(row[col])
try:
sentiment_dict[f"{col}_compound"].append(sentiment["compound"])
sentiment_dict[f"{col}_pos"].append(sentiment["pos"])
sentiment_dict[f"{col}_neu"].append(sentiment["neu"])
sentiment_dict[f"{col}_neg"].append(sentiment["neg"])
sentiment_dict[f"{col}_sent"].append(get_sentiment(sentiment["compound"]))
except AttributeError as aexn:
pass
df = df.join(pd.DataFrame(sentiment_dict))
return df
if __name__ == "__main__":
print("Running the Personal NLTK Module")
print(bar_sep("Testing Testing One two Three"))
print(dict(Counter(tokenizer("Testing Testing One two Three")).most_common(10)))
print(tokenizer("Testing Testing One two Three",Post_Processor=word_count))
print(tokenizer("Testing Testing One two Three",Post_Processor=bar_sep))
print(tokenizer("Testing Testing One two Three",Post_Processor=bi_grams))
df = pd.DataFrame(
data={
'Col1':[1,2,3,4]
,'Col2':['A','B','C','D']
,'Col3':['asdf','asdf','zcsd','steve']
}
)
#display(df)
print(df)
print(Attach_Sentiment_Scores_2_df(df,txt_cols="Col3")) | [
2,
20768,
17944,
198,
6738,
11845,
1330,
1110,
62,
397,
1671,
198,
11748,
299,
2528,
74,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
299,
2528,
74,
13,
34086,
3681,
13,
85,
5067,
1330,
11352,
... | 2.494172 | 2,831 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Benchmarking with sktime
#
# The benchmarking modules allows you to easily orchestrate benchmarking experiments in which you want to compare the performance of one or more algorithms over one or more data sets. It also provides a number of statistical tests to check if observed performance differences are statistically significant.
#
# The benchmarking modules is based on [mlaut](https://github.com/alan-turing-institute/mlaut).
#
# ## Preliminaries
# + pycharm={"name": "#%%\n"} tags=[]
# import required functions and classes
import os
from sklearn.metrics import accuracy_score
from sktime.benchmarking.data import UEADataset, make_datasets
from sktime.benchmarking.evaluation import Evaluator
from sktime.benchmarking.metrics import PairwiseMetric
from sktime.benchmarking.orchestration import Orchestrator
from sktime.benchmarking.results import HDDResults
from sktime.benchmarking.strategies import TSCStrategy
from sktime.benchmarking.tasks import TSCTask
from sktime.classification.interval_based import (
RandomIntervalSpectralForest,
TimeSeriesForestClassifier,
)
from sktime.series_as_features.model_selection import PresplitFilesCV
# -
# ### Set up paths
# + tags=[]
# set up paths to data and results folder
import sktime
DATA_PATH = os.path.join(os.path.dirname(sktime.__file__), "datasets/data")
RESULTS_PATH = "results"
# -
# ### Create pointers to datasets on hard drive
# Here we use the `UEADataset` which follows the [UEA/UCR format](http://www.timeseriesclassification.com) and some of the time series classification datasets included in sktime.
# + tags=[]
# Create individual pointers to dataset on the disk
datasets = [
UEADataset(path=DATA_PATH, name="ArrowHead"),
UEADataset(path=DATA_PATH, name="ItalyPowerDemand"),
]
# + tags=[]
# Alternatively, we can use a helper function to create them automatically
datasets = make_datasets(
path=DATA_PATH, dataset_cls=UEADataset, names=["ArrowHead", "ItalyPowerDemand"]
)
# -
# ### For each dataset, we also need to specify a learning task
# The learning task encapsulate all the information and instructions that define the problem we're trying to solve. In our case, we're trying to solve classification tasks and the key information we need is the name of the target variable in the data set that we're trying to predict. Here all tasks are the same because the target variable has the same name in all data sets.
# + tags=[]
tasks = [TSCTask(target="target") for _ in range(len(datasets))]
# -
# ### Specify learning strategies
# Having set up the data sets and corresponding learning tasks, we need to define the algorithms we want to evaluate and compare.
# + pycharm={"name": "#%%\n"} tags=[]
# Specify learning strategies
strategies = [
TSCStrategy(TimeSeriesForestClassifier(n_estimators=10), name="tsf"),
TSCStrategy(RandomIntervalSpectralForest(n_estimators=10), name="rise"),
]
# -
# ### Set up a results object
# The results object encapsulates where and how benchmarking results are stored, here we choose to output them to the hard drive.
# Specify results object which manages the output of the benchmarking
results = HDDResults(path=RESULTS_PATH)
# ## Run benchmarking
# Finally, we pass all specifications to the orchestrator. The orchestrator will automatically train and evaluate all algorithms on all data sets and write out the results.
# run orchestrator
orchestrator = Orchestrator(
datasets=datasets,
tasks=tasks,
strategies=strategies,
cv=PresplitFilesCV(),
results=results,
)
orchestrator.fit_predict(save_fitted_strategies=False, overwrite_predictions=True)
# ## Evaluate and compare results
# Having run the orchestrator, we can evaluate and compare the prediction strategies.
evaluator = Evaluator(results)
metric = PairwiseMetric(func=accuracy_score, name="accuracy")
metrics_by_strategy = evaluator.evaluate(metric=metric)
metrics_by_strategy.head()
# + [markdown] pycharm={"name": "#%% md\n"}
# The evaluator offers a number of additional methods for evaluating and comparing strategies, including statistical hypothesis tests and visualisation tools, for example:
# + pycharm={"name": "#%%\n"}
evaluator.rank()
# -
# Currently, the following functions are implemented:
#
# * `evaluator.plot_boxplots()`
# * `evaluator.ranks()`
# * `evaluator.t_test()`
# * `evaluator.sign_test()`
# * `evaluator.ranksum_test()`
# * `evaluator.t_test_with_bonferroni_correction()`
# * `evaluator.wilcoxon_test()`
# * `evaluator.friedman_test()`
# * `evaluator.nemenyi()`
| [
2,
11420,
198,
2,
474,
929,
88,
353,
25,
198,
2,
220,
220,
474,
929,
88,
5239,
25,
198,
2,
220,
220,
220,
220,
2420,
62,
15603,
341,
25,
198,
2,
220,
220,
220,
220,
220,
220,
7552,
25,
764,
9078,
198,
2,
220,
220,
220,
220,
... | 3.215818 | 1,492 |
#!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm, ListedColormap,BoundaryNorm
import numpy as np
import datetime as dt
import sys, os, pickle
from scipy.ndimage.filters import gaussian_filter
import pandas as pd
from mpl_toolkits.basemap import *
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn import metrics
from keras.models import Model, save_model, load_model
from keras.layers import Dense, Activation, Conv2D, Input, AveragePooling2D, Flatten, LeakyReLU
from keras.layers import Dropout, BatchNormalization
from keras.regularizers import l2
from keras.optimizers import SGD, Adam
import keras.backend as K
import tensorflow as tf
from keras.utils import plot_model
### NEURAL NETWORK PARAMETERS ###
nn_params = { 'num_layers': 1, 'num_neurons': [ int(sys.argv[1]) ], 'dropout': float(sys.argv[2]), 'lr': 0.001, 'num_epochs': 10, \
'report_window_space':[ 120 ], 'report_window_time':[ 2 ] }
year = 2011
d=120
twin = "_%dhr"%nn_params['report_window_time'][0]
##################################
print('Training random forest classifier')
features = ['fhr', 'dayofyear', 'lat', 'lon', 'UP_HELI_MAX', 'UP_HELI_MAX03', 'UP_HELI_MAX01', 'W_UP_MAX', 'W_DN_MAX', 'WSPD10MAX', 'MUCAPE', 'SBCAPE', 'SBCINH', 'SHR06', 'MLCINH', 'MLLCL', 'SHR01', 'SRH01', 'SRH03', 'T2', 'TD2', 'PSFC','PREC_ACC_NC','CAPESHEAR', 'STP', 'LR75']
large_scale_features = ['U925','U850','U700','U500','V925','V850','V700','V500','T925','T850','T700','T500','TD925','TD850','TD700','TD500']
neighbor_fields = ['UP_HELI_MAX', 'UP_HELI_MAX03', 'UP_HELI_MAX01', 'W_UP_MAX', 'W_DN_MAX', 'WSPD10MAX', 'STP', 'CAPESHEAR', 'MUCAPE', 'SBCAPE', 'SBCINH', 'MLLCL', 'SHR06', 'SHR01', 'SRH03', 'SRH01', 'T2', 'TD2', 'PSFC', 'PREC_ACC_NC']
neighbor_features = [ f+'-%s'%n for f in neighbor_fields for n in ['E1', 'S1', 'N1', 'W1', 'TP1', 'TM1', 'TM2', 'TP2'] ]
features = features + large_scale_features + neighbor_features
print('Number of features', len(features))
print(features)
dense_model = None
model_fname = 'neural_network_%s_%dkm%s_nn%d_drop%.1f.h5'%(year,d,twin,nn_params['num_neurons'][0],nn_params['dropout'])
dense_model = load_model(model_fname, custom_objects={'brier_score_keras': brier_score_keras, 'brier_skill_score_keras':brier_skill_score_keras, 'auc':auc })
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
46384,
11537,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
13,
4033... | 2.48179 | 961 |
import pytest
from kickscraper import KickStarterClient
from kickscraper import Project
@pytest.fixture
@pytest.fixture
| [
11748,
12972,
9288,
198,
198,
6738,
17364,
66,
38545,
1330,
10279,
1273,
2571,
11792,
198,
6738,
17364,
66,
38545,
1330,
4935,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198
] | 3.25641 | 39 |
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='pyFortiManagerAPI',
description='A Python wrapper for the FortiManager REST API',
version='0.1.6',
py_modules=["pyFortiManagerAPI"],
package_dir={'': 'src'},
keywords=['Fortimanager', 'RestAPI', 'API', 'Fortigate', 'Fortinet', "python", "Fortimanager API",
"Fortimanager API Python", "Python examples"],
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Operating System :: OS Independent',
],
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=['requests', 'urllib3'],
url="https://github.com/akshaymane920/pyFortiManagerAPI",
author="Akshay Mane",
author_email="akshaymane920@gmail.com",
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
40406,
7,
198,
220,
220,
22... | 2.609023 | 399 |
from datetime import datetime
import unittest
from flight_tables.flight_parsing import Flight, ParsedFlights
class TestFlight(unittest.TestCase):
"""Test the class called Flight"""
if __name__ == '__main__':
unittest.main()
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
555,
715,
395,
198,
198,
6738,
5474,
62,
83,
2977,
13,
22560,
62,
79,
945,
278,
1330,
13365,
11,
23042,
276,
7414,
2337,
198,
198,
4871,
6208,
43069,
7,
403,
715,
395,
13,
14402,
20448... | 3.038961 | 77 |
# Generated by Django 3.1.1 on 2020-10-22 20:10
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
16,
319,
12131,
12,
940,
12,
1828,
1160,
25,
940,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# util
#
import datetime
import sys
import traceback
import collections
# end UTCTimestampUsec
# end UTCTimestampUsecToString
# end enum
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
# end class deque
| [
2,
198,
2,
15069,
357,
66,
8,
2211,
7653,
9346,
27862,
11,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
198,
2,
198,
2,
7736,
198,
2,
198,
198,
11748,
4818,
8079,
198,
11748,
25064,
198,
11748,
12854,
1891,
198,
11748,
17268,
198... | 2.444043 | 277 |
# -*- coding: utf-8 -*-
"""leetcode_twosum.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1RPnZuejWl1-nL1k0Q0m7BXYbwQduQJaJ
"""
# 문제: https://leetcode.com/problems/two-sum/ | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
293,
316,
8189,
62,
4246,
418,
388,
13,
541,
2047,
65,
198,
198,
38062,
4142,
7560,
416,
1623,
4820,
2870,
13,
198,
198,
20556,
2393,
318,
5140,
379,
198,
220,
... | 2.147826 | 115 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
"""Implement the Unit class."""
import numpy as np
from .. import config, constants
__all__ = ["Pixels", "Degrees", "Munits", "Percent"]
Pixels = _PixelUnits()
Degrees = constants.PI / 180
Munits = 1
| [
37811,
3546,
26908,
262,
11801,
1398,
526,
15931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
11485,
1330,
4566,
11,
38491,
198,
198,
834,
439,
834,
796,
14631,
47,
14810,
1600,
366,
35,
1533,
6037,
1600,
366,
44,
41667,
... | 2.875 | 72 |
import os
import pandas as pd
ric_folders_endswith = (
'core_dm_daily_c_d_rif',
'core_em_daily_d_rif',
'scap_dm_daily_c_d_rif',
'scap_em_daily_d_rif'
)
ric_files_endswith = (
'CORE_DM_ALL_SECURITY_CODE_DAILY_D_RIF',
'CORE_EM_ALL_SECURITY_CODE_DAILY_D_RIF',
'SCAP_DM_ALL_SECURITY_CODE_DAILY_D_RIF',
'SCAP_EM_ALL_SECURITY_CODE_DAILY_D_RIF'
)
if __name__ == '__main__':
input_directory = 'C:/Users/Mnguyen/Data/msci/csv/'
folder_list = os.listdir(input_directory)
ric_folder_list = sorted(list(filter(lambda x: x.endswith(ric_folders_endswith), folder_list)))
| [
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
1173,
62,
11379,
364,
62,
437,
2032,
342,
796,
357,
198,
220,
220,
220,
705,
7295,
62,
36020,
62,
29468,
62,
66,
62,
67,
62,
81,
361,
3256,
198,
220,
220,
220,
705,
7... | 1.967532 | 308 |
import logging
import math
from pyha.common.context_managers import SimPath
from pyha.common.fixed_point import Sfix
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('complex')
class Complex:
"""
Complex number with ``.real`` and ``.imag`` elements. Default type is ``Complex(left=0, right=-17)``.
:param val:
:param left: left bound for both components
:param right: right bound for both components
:param overflow_style: 'wrap' (default) or 'saturate'.
:param round_style: 'truncate' (default) or 'round'
>>> a = Complex(0.45 + 0.88j, left=0, right=-17)
>>> a
0.45+0.88j [0:-17]
>>> a.real
0.4499969482421875 [0:-17]
>>> a.imag
0.8799972534179688 [0:-17]
Another way to construct:
>>> a = Sfix(-0.5, 0, -17)
>>> b = Sfix(0.5, 0, -17)
>>> Complex(a, b)
-0.50+0.50j [0:-17]
"""
__slots__ = ('round_style', 'overflow_style', 'right', 'left', 'val', 'wrap_is_ok', 'signed', 'bits', 'upper_bits')
@property
# @real.setter
# def real(self, value):
# self.val = value.val + self.val.imag*1j
# self.fixed_effects()
@property
# @imag.setter
# def imag(self, value):
# self.val = self.val.real + value.val*1j
# self.fixed_effects()
def __mul__(self, other):
""" Complex multiplication!
(x + yj)(u + vj) = (xu - yv) + (xv + yu)j
Also support mult by float.
"""
other = self._convert_other_operand(other)
extra_bit = 1 # for complex mult, from addition
if isinstance(other, (Sfix, float)):
extra_bit = 0 # for real mult
left = (self.left + other.left + 1) + extra_bit
right = self.right + other.right
return Complex(self.val * other.val,
left,
right,
init_only=True)
@staticmethod
default_complex = Complex(0, 0, -17, overflow_style='saturate', round_style='round')
| [
11748,
18931,
198,
11748,
10688,
198,
198,
6738,
12972,
3099,
13,
11321,
13,
22866,
62,
805,
10321,
1330,
3184,
15235,
198,
6738,
12972,
3099,
13,
11321,
13,
34021,
62,
4122,
1330,
311,
13049,
198,
198,
6404,
2667,
13,
35487,
16934,
7,
... | 2.237136 | 894 |
import urllib
from textblob import TextBlob, Word
from word2number import w2n
from user import User
from goal import Goal
from responses import get_response
| [
11748,
2956,
297,
571,
198,
198,
6738,
2420,
2436,
672,
1330,
8255,
3629,
672,
11,
9678,
198,
6738,
1573,
17,
17618,
1330,
266,
17,
77,
198,
198,
6738,
2836,
1330,
11787,
198,
6738,
3061,
1330,
25376,
198,
6738,
9109,
1330,
651,
62,
... | 3.613636 | 44 |
from proxypool.schemas.proxy import Proxy
from proxypool.crawlers.base import BaseCrawler
import json
BASE_URL = 'https://cool-proxy.net/proxies.json'
class CoolproxyCrawler(BaseCrawler):
"""
https://cool-proxy.net/
"""
urls = [BASE_URL]
if __name__ == '__main__':
crawler = CoolproxyCrawler()
for proxy in crawler.crawl():
print(proxy)
| [
6738,
14793,
4464,
970,
13,
1416,
4411,
292,
13,
36436,
1330,
38027,
198,
6738,
14793,
4464,
970,
13,
66,
1831,
8116,
13,
8692,
1330,
7308,
34,
39464,
198,
11748,
33918,
198,
198,
33,
11159,
62,
21886,
796,
705,
5450,
1378,
24494,
12,... | 2.510067 | 149 |
import pytest
from tgbotscenario.asynchronous import MemorySceneStorage
from tests.generators import generate_chat_id
@pytest.mark.parametrize(
("chat_id", "user_id"),
(
(generate_chat_id(), generate_chat_id()), # different
(generate_chat_id(),) * 2 # same
)
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
("chat_id", "user_id"),
(
(generate_chat_id(), generate_chat_id()), # different
(generate_chat_id(),) * 2 # same
)
)
@pytest.mark.asyncio
| [
11748,
12972,
9288,
198,
198,
6738,
256,
70,
13645,
1416,
39055,
13,
292,
31301,
1330,
14059,
36542,
31425,
198,
6738,
5254,
13,
8612,
2024,
1330,
7716,
62,
17006,
62,
312,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736... | 2.271111 | 225 |
import os
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib import pyplot as plt
from svphase.utils.config import FONT, COLORS
from svphase.learn.evaluation import ClassLabel
#def _get_model_and_version(model_stat):
# fname = os.path.basename(model_stat).split('.')
# return fname[0], '.'.join(fname[1:3])
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description='Plot accuracies for each class from a truth file and a set of predictions')
parser.add_argument('-o', dest='save', default=None, help="Path to save figure to")
parser.add_argument('truth', help="truth.bed file, with loci+truth labels")
parser.add_argument('pred', help="prediction csv file from InPhadel, (likely used truth.bed to run InPhadel)")
args = parser.parse_args()
plot(args.truth, args.pred, out_fpath=args.save)
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
355,
285,
489,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
6738,
38487,
40715,
13,
26791,
13... | 2.996516 | 287 |
'''
May 2017
@author: Burkhard
'''
import unittest
from Ch08_Code.LanguageResources import I18N
from Ch08_Code.GUI_Refactored import OOP as GUI
#==========================
if __name__ == '__main__':
unittest.main()
| [
7061,
6,
198,
6747,
2177,
198,
31,
9800,
25,
37940,
10424,
198,
7061,
6,
198,
198,
11748,
555,
715,
395,
198,
6738,
609,
2919,
62,
10669,
13,
32065,
33236,
1330,
314,
1507,
45,
198,
6738,
609,
2919,
62,
10669,
13,
40156,
62,
8134,
... | 2.263158 | 114 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Given a symbol s, what is the probability P(n|s) of a stroke count of n?
Download data for each symbol.
"""
import logging
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
import pymysql.cursors
import numpy
from collections import Counter
import yaml
# My packages
from hwrt.handwritten_data import HandwrittenData
from hwrt import utils
from write_math_utils import get_formulas
def main(dataset='all'):
"""
Parameters
----------
dataset : string
Either 'all' or a path to a yaml symbol file.
"""
cfg = utils.get_database_configuration()
mysql = cfg['mysql_online']
connection = pymysql.connect(host=mysql['host'],
user=mysql['user'],
passwd=mysql['passwd'],
db=mysql['db'],
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
# TODO: no formulas, only single-symbol ones.
formulas = get_formulas(cursor, dataset)
prob = {}
# Go through each formula and download every raw_data instance
for formula in formulas:
stroke_counts = []
recordings = []
sql = (("SELECT `wm_raw_draw_data`.`id`, `data`, `is_in_testset`, "
"`wild_point_count`, `missing_line`, `user_id`, "
"`display_name` "
"FROM `wm_raw_draw_data` "
"JOIN `wm_users` ON "
"(`wm_users`.`id` = `wm_raw_draw_data`.`user_id`) "
"WHERE `accepted_formula_id` = %s "
"AND wild_point_count=0 "
"AND has_correction=0 "
# "AND `display_name` LIKE 'MfrDB::%%'"
) %
str(formula['id']))
cursor.execute(sql)
raw_datasets = cursor.fetchall()
logging.info("%s (%i)", formula['formula_in_latex'], len(raw_datasets))
for raw_data in raw_datasets:
try:
handwriting = HandwrittenData(raw_data['data'],
formula['id'],
raw_data['id'],
formula['formula_in_latex'],
raw_data['wild_point_count'],
raw_data['missing_line'],
raw_data['user_id'])
stroke_counts.append(len(handwriting.get_pointlist()))
recordings.append(handwriting)
except Exception as e:
logging.info("Raw data id: %s", raw_data['id'])
logging.info(e)
if len(stroke_counts) > 0:
logging.info("\t[%i - %i]", min(stroke_counts), max(stroke_counts))
median = numpy.median(stroke_counts)
logging.info("\tMedian: %0.2f\tMean: %0.2f\tstd: %0.2f",
median,
numpy.mean(stroke_counts),
numpy.std(stroke_counts))
# Make prob
s = sorted(Counter(stroke_counts).items(),
key=lambda n: n[1],
reverse=True)
key = formula['formula_in_latex']
prob[key] = {}
for stroke_nr, count in s:
prob[key][stroke_nr] = count
# Outliers
modes = get_modes(stroke_counts)
logging.info("\tModes: %s", modes)
exceptions = []
for rec in recordings:
if len(rec.get_pointlist()) not in modes:
url = (("http://www.martin-thoma.de/"
"write-math/view/?raw_data_id=%i - "
"%i strokes") % (rec.raw_data_id,
len(rec.get_pointlist())))
dist = get_dist(len(rec.get_pointlist()), modes)
exceptions.append((url, len(rec.get_pointlist()), dist))
print_exceptions(exceptions, max_print=10)
else:
logging.debug("No recordings for symbol "
"'http://www.martin-thoma.de/"
"write-math/symbol/?id=%s'.",
formula['id'])
write_prob(prob, "prob_stroke_count_by_symbol.yml")
def print_exceptions(exceptions, max_print=10):
"""
Print the exceptions, but not too many.
Parameters
----------
exceptions : list
Triplets (url, stroke_count, dist to closest mode)
max_print : int
Print not more then max_print lines
"""
exceptions = sorted(exceptions,
key=lambda n: (n[2], n[1]),
reverse=True)[:max_print]
for url, stroke_count, _ in exceptions:
logging.info("\t%s - %i strokes", url, stroke_count)
def get_dist(stroke_count, modes):
"""
Get the distance to the closest mode.
Parameters
----------
stroke_count : int
modes : list of int
"""
dist = float('inf')
for mode in modes:
dist = min(dist, abs(mode - stroke_count))
return dist
def get_modes(empiric_distribution, at_least_total=10):
"""
Get all values which are at least at_least_total
times in the data.
The most common value does not have to have at_least_total apearences in
the data.
Parameters
----------
empiric_distribution : list
List of integers
at_least_total : int
"""
modes = []
s = sorted(Counter(empiric_distribution).items(),
key=lambda n: n[1],
reverse=True)
total = float(len(s))
for stroke_count, appearences in s:
constrain1 = (stroke_count >= at_least_total and
appearences/total >= at_least_total)
if constrain1 or len(modes) == 0:
modes.append(stroke_count)
return modes
def write_prob(counts, filename):
"""
Parameters
----------
prob : dict
Mapping symbols to dicts of stroke counts with total count
filename : str
"""
probs = {}
# Get probabilities with smoothing
for symbol_id, value in counts.items():
probs[symbol_id] = {}
total_count = 0
for i in range(1, 10):
probs[symbol_id][i] = 1
if i in value:
probs[symbol_id][i] += value[i]
total_count += probs[symbol_id][i]
for i in range(1, 10):
probs[symbol_id][i] = probs[symbol_id][i] / float(total_count)
# Write it
with open(filename, 'w') as outfile:
outfile.write(yaml.dump(probs, default_flow_style=False))
if __name__ == "__main__":
args = get_parser().parse_args()
main(args.filename)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
15056,
257,
6194,
264,
11,
644,
318,
262,
12867,
350,
7,
77,
91,
82,
8,
286,
257,
14000,
954,
286,... | 1.926132 | 3,601 |
"""
Example:
python compute_metrics.py --inference-path classification_results.json.gz \
--label-path classification/data/net_training_20161115.csv \
--dest-path fltest.html --fishing-ranges classification/data/combined_fishing_ranges.csv \
--dump-labels-to . \
--skip-localisation-metrics
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import csv
import subprocess
import numpy as np
import dateutil.parser
import logging
import argparse
from collections import namedtuple, defaultdict
import sys
import yattag
import newlinejson as nlj
from classification.utility import VESSEL_CLASS_DETAILED_NAMES, VESSEL_CATEGORIES, TEST_SPLIT, schema, atomic
import gzip
import dateutil.parser
import datetime
import pytz
'''
unknown:
fishing:
other_not_fishing:
passenger:
gear:
seismic_vessel:
helicopter:
cargo_or_tanker:
bunker_or_tanker:
bunker:
tanker:
cargo_or_reefer:
cargo:
reefer:
patrol_vessel:
research:
dive_vessel:
submarine:
dredge:
supply_vessel:
fish_factory:
tug:
non_fishing:
squid_jigger:
drifting_longlines:
pole_and_line:
other_fishing:
trollers:
fixed_gear:
pots_and_traps:
set_longlines:
set_gillnets:
trawlers:
purse_seines:
driftnets:
unknown_fishing:
'''
# coarse_mapping = [
# ['cargo_or_tanker', {'tanker', 'cargo', 'bunker', 'reefer'}],
# ['passenger', {'passenger'}],
# ['helicopter', {'helicopter'}]
# ['seismic_vessel', ['seismic_vessel'}],
# ['patrol_vessel', {'patrol_vessel'}],
# ['research', {'research'}],
# ['']
# ['tug', {'tug'}],
# ['other_not_fishing', {'other_not_fishing'}],
# ['drifting_longlines', {'drifting_longlines'}],
# ['purse_seines', {'purse_seines'}],
# ['fixed_gear', {'pots_and_traps', 'set_gillnets', 'set_longlines'}],
# ['squid_jigger', ['squid_jigger']],
# ['gear', ['gear']],
# ['trawlers', {'trawlers'}],
# ['other_fishing', {'pole_and_line', 'trollers', 'other_fishing', 'drift_nets'}]
# ]
coarse_categories = [
'cargo_or_tanker', 'passenger', 'seismic_vessel', 'tug', 'other_fishing',
'drifting_longlines', 'purse_seines', 'fixed_gear', 'squid_jigger', 'trawlers',
'other_not_fishing']
coarse_mapping = defaultdict(set)
for k0, extra in [('fishing', 'other_fishing'),
('non_fishing', 'other_not_fishing')]:
for k1, v1 in schema['unknown'][k0].items():
key = k1 if (k1 in coarse_categories) else extra
if v1 is None:
coarse_mapping[key] |= {k1}
else:
coarse_mapping[key] |= set(atomic(v1))
coarse_mapping = [(k, coarse_mapping[k]) for k in coarse_categories]
fishing_mapping = [
['fishing', set(atomic(schema['unknown']['fishing']))],
['non_fishing', set(atomic(schema['unknown']['non_fishing']))],
]
# for k, v in coarse_mapping:
# print(k, v)
# print()
# for k, v in fishing_mapping:
# print(k, v)
# raise SystemExit
# Faster than using dateutil
AttributeResults = namedtuple(
'AttributeResults',
['mmsi', 'inferred_attrs', 'true_attrs', 'true_labels', 'start_dates'])
LocalisationResults = namedtuple('LocalisationResults',
['true_fishing_by_mmsi',
'pred_fishing_by_mmsi', 'label_map'])
ConfusionMatrix = namedtuple('ConfusionMatrix', ['raw', 'scaled'])
CLASSIFICATION_METRICS = [
('fishing', 'Is Fishing'),
('coarse', 'Coarse Labels'),
('fine', 'Fine Labels'),
]
css = """
table {
text-align: center;
border-collapse: collapse;
}
.confusion-matrix th.col {
height: 140px;
white-space: nowrap;
}
.confusion-matrix th.col div {
transform: translate(16px, 49px) rotate(315deg);
width: 30px;
}
.confusion-matrix th.col span {
border-bottom: 1px solid #ccc;
padding: 5px 10px;
text-align: left;
}
.confusion-matrix th.row {
text-align: right;
}
.confusion-matrix td.diagonal {
border: 1px solid black;
}
.confusion-matrix td.offdiagonal {
border: 1px dotted grey;
}
.unbreakable {
page-break-inside: avoid;
}
"""
# basic metrics
# Helper function formatting as HTML (using yattag)
def ydump_confusion_matrix(doc, cm, labels, **kwargs):
"""Dump an sklearn confusion matrix to HTML using yatag
Args:
doc: yatag Doc instance
cm: ConfusionMatrix instance
labels: list of str
labels for confusion matrix
"""
doc, tag, text, line = doc.ttl()
with tag('table', klass='confusion-matrix', **kwargs):
with tag('tr'):
line('th', '')
for x in labels:
with tag('th', klass='col'):
with tag('div'):
line('span', x)
for i, (l, row) in enumerate(zip(labels, cm.scaled)):
with tag('tr'):
line('th', str(l), klass='row')
for j, x in enumerate(row):
if i == j:
if x == -1:
# No values present in this row, column
color = '#FFFFFF'
elif x > 0.5:
cval = np.clip(int(round(512 * (x - 0.5))), 0, 255)
invhexcode = '{:02x}'.format(255 - cval)
color = '#{}FF00'.format(invhexcode)
else:
cval = np.clip(int(round(512 * x)), 0, 255)
hexcode = '{:02x}'.format(cval)
color = '#FF{}00'.format(hexcode)
klass = 'diagonal'
else:
cval = np.clip(int(round(255 * x)), 0, 255)
hexcode = '{:02x}'.format(cval)
invhexcode = '{:02x}'.format(255 - cval)
color = '#FF{}{}'.format(invhexcode, invhexcode)
klass = 'offdiagonal'
with tag('td', klass=klass, bgcolor=color):
raw = cm.raw[i, j]
with tag('font',
color='#000000',
title='{0:.3f}'.format(x)):
text(str(raw))
def ydump_table(doc, headings, rows, **kwargs):
"""Dump an html table using yatag
Args:
doc: yatag Doc instance
headings: [str]
rows: [[str]]
"""
doc, tag, text, line = doc.ttl()
with tag('table', **kwargs):
with tag('tr'):
for x in headings:
line('th', str(x))
for row in rows:
with tag('tr'):
for x in row:
line('td', str(x))
def ydump_attrs(doc, results):
"""dump metrics for `results` to html using yatag
Args:
doc: yatag Doc instance
results: InferenceResults instance
"""
doc, tag, text, line = doc.ttl()
# TODO: move computations out of loops for speed.
# true_mask = np.array([(x is not None) for x in results.true_attrs])
# infer_mask = np.array([(x is not None) for x in results.inferred_attrs])
true_mask = ~np.isnan(results.true_attrs)
infer_mask = ~np.isnan(results.inferred_attrs)
rows = []
for dt in np.unique(results.start_dates):
mask = true_mask & infer_mask & (results.start_dates == dt)
rows.append(
[dt, RMS(results.true_attrs[mask], results.inferred_attrs[mask]),
MAE(results.true_attrs[mask], results.inferred_attrs[mask])])
with tag('div', klass='unbreakable'):
line('h3', 'RMS Error by Date')
ydump_table(doc, ['Start Date', 'RMS Error', 'Abs Error'],
[(a.date(), '{:.2f}'.format(b), '{:.2f}'.format(c))
for (a, b, c) in rows])
logging.info(' Consolidating attributes')
consolidated = consolidate_attribute_across_dates(results)
# true_mask = np.array([(x is not None) for x in consolidated.true_attrs])
# infer_mask = np.array([(x is not None) for x in consolidated.inferred_attrs])
true_mask = ~np.isnan(consolidated.true_attrs)
infer_mask = ~np.isnan(consolidated.inferred_attrs)
logging.info(' RMS Error')
with tag('div', klass='unbreakable'):
line('h3', 'Overall RMS Error')
text('{:.2f}'.format(
RMS(consolidated.true_attrs[true_mask & infer_mask],
consolidated.inferred_attrs[true_mask & infer_mask])))
logging.info(' ABS Error')
with tag('div', klass='unbreakable'):
line('h3', 'Overall Abs Error')
text('{:.2f}'.format(
MAE(consolidated.true_attrs[true_mask & infer_mask],
consolidated.inferred_attrs[true_mask & infer_mask])))
logging.info(' Error by Label')
with tag('div', klass='unbreakable'):
line('h3', 'RMS Error by Label')
ydump_table(
doc,
['Label', 'Count', 'RMS Error', 'Abs Error', 'Mean', 'StdDev'
], # TODO: pass in length and units
[
(a, count, '{:.2f}'.format(b), '{:.2f}'.format(ab),
'{:.2f}'.format(c), '{:.2f}'.format(d))
for (a, count, b, ab, c, d) in RMS_MAE_by_label(
consolidated.true_attrs, consolidated.inferred_attrs,
consolidated.true_labels)
])
def ydump_metrics(doc, results):
"""dump metrics for `results` to html using yatag
Args:
doc: yatag Doc instance
results: InferenceResults instance
"""
doc, tag, text, line = doc.ttl()
rows = [
(x, accuracy_score(results.true_labels, results.inferred_labels,
(results.start_dates == x)))
for x in np.unique(results.start_dates)
]
with tag('div', klass='unbreakable'):
line('h3', 'Accuracy by Date')
ydump_table(doc, ['Start Date', 'Accuracy'],
[(a.date(), '{:.2f}'.format(b)) for (a, b) in rows])
consolidated = consolidate_across_dates(results)
with tag('div', klass='unbreakable'):
line('h3', 'Overall Accuracy')
text('{:.2f}'.format(
accuracy_score(consolidated.true_labels,
consolidated.inferred_labels)))
cm = confusion_matrix(consolidated)
with tag('div', klass='unbreakable'):
line('h3', 'Confusion Matrix')
ydump_confusion_matrix(doc, cm, results.label_list)
with tag('div', klass='unbreakable'):
line('h3', 'Metrics by Label')
row_vals = precision_recall_f1(consolidated.label_list,
consolidated.true_labels,
consolidated.inferred_labels)
ydump_table(doc, ['Label (mmsi:true/total)', 'Precision', 'Recall', 'F1-Score'], [
(a, '{:.2f}'.format(b), '{:.2f}'.format(c), '{:.2f}'.format(d))
for (a, b, c, d) in row_vals
])
wts = weights(consolidated.label_list, consolidated.true_labels,
consolidated.inferred_labels)
line('h4', 'Accuracy with equal class weight')
text(
str(
accuracy_score(consolidated.true_labels,
consolidated.inferred_labels, wts)))
fishing_category_map = {
'drifting_longlines' : 'drifting_longlines',
'trawlers' : 'trawlers',
'purse_seines' : 'purse_seines',
'pots_and_traps' : 'stationary_gear',
'set_gillnets' : 'stationary_gear',
'set_longlines' : 'stationary_gear'
}
# Helper functions for computing metrics
def consolidate_across_dates(results, date_range=None):
"""Consolidate scores for each MMSI across available dates.
For each mmsi, we take the scores at all available dates, sum
them and use argmax to find the predicted results.
Optionally accepts a date range, which specifies half open ranges
for the dates.
"""
inferred_mmsi = []
inferred_labels = []
true_labels = []
if date_range is None:
valid_date_mask = np.ones([len(results.mmsi)], dtype=bool)
else:
# TODO: write out end date as well, so that we avoid this hackery
end_dates = results.start_dates + datetime.timedelta(days=180)
valid_date_mask = (results.start_dates >= date_range[0]) & (
results.start_dates < date_range[1])
mmsi_map = {}
mmsi_indices = []
for i, m in enumerate(results.mmsi):
if valid_date_mask[i]:
if m not in mmsi_map:
mmsi_map[m] = len(inferred_mmsi)
inferred_mmsi.append(m)
true_labels.append(results.true_labels[i])
mmsi_indices.append(mmsi_map[m])
else:
mmsi_indices.append(-1)
mmsi_indices = np.array(mmsi_indices)
scores = np.zeros([len(inferred_mmsi), len(results.label_list)])
counts = np.zeros([len(inferred_mmsi)])
for i, valid in enumerate(valid_date_mask):
if valid:
scores[mmsi_indices[i]] += results.indexed_scores[i]
counts[mmsi_indices[i]] += 1
inferred_labels = []
for i, s in enumerate(scores):
inferred_labels.append(results.label_list[np.argmax(s)])
if counts[i]:
scores[i] /= counts[i]
return InferenceResults(
np.array(inferred_mmsi), np.array(inferred_labels),
np.array(true_labels), None, scores, results.label_list)
def consolidate_attribute_across_dates(results, date_range=None):
"""Consolidate scores for each MMSI across available dates.
For each mmsi, we average the attribute across all available dates
"""
inferred_attributes = []
true_attributes = []
true_labels = []
indices = np.argsort(results.mmsi)
mmsi = np.unique(results.mmsi)
for m in np.unique(results.mmsi):
start = np.searchsorted(results.mmsi, m, side='left', sorter=indices)
stop = np.searchsorted(results.mmsi, m, side='right', sorter=indices)
attrs_for_mmsi = results.inferred_attrs[indices[start:stop]]
if date_range:
start_dates = results.start_dates[indices[start:stop]]
# TODO: This is kind of messy need to verify that date ranges and output ranges line up
valid_date_mask = (start_dates >= date_range[0]) & (start_dates < date_range[1])
attrs = attrs_for_mmsi[valid_date_mask]
else:
attrs = attrs_for_mmsi
if len(attrs):
inferred_attributes.append(attrs.mean())
else:
inferred_attributes.append(np.nan)
trues = results.true_attrs[indices[start:stop]]
has_true = ~np.isnan(trues)
if has_true.sum():
true_attributes.append(trues[has_true].mean())
else:
true_attributes.append(np.nan)
labels = results.true_labels[indices[start:stop]]
has_labels = (labels != "Unknown")
if has_labels.sum():
true_labels.append(labels[has_labels][0])
else:
true_labels.append("Unknown")
return AttributeResults(
mmsi, np.array(inferred_attributes), np.array(true_attributes),
np.array(true_labels), None)
def confusion_matrix(results):
"""Compute raw and normalized confusion matrices based on results.
Args:
results: InferenceResults instance
Returns:
ConfusionMatrix instance, with raw and normalized (`scaled`)
attributes.
"""
EPS = 1e-10
cm_raw = base_confusion_matrix(results.true_labels,
results.inferred_labels, results.label_list)
# For off axis, normalize harmonic mean of row / col inverse errors.
# The idea here is that this average will go to 1 => BAD, as
# either the row error or column error approaches 1. That is, if this
# off diagonal element dominates eitehr the predicted values for this
# label OR the actual values for this label. A standard mean will only
# go to zero if it dominates both, but these can become decoupled with
# unbalanced classes.
row_totals = cm_raw.sum(axis=1, keepdims=True)
col_totals = cm_raw.sum(axis=0, keepdims=True)
inv_row_fracs = 1 - cm_raw / (row_totals + EPS)
inv_col_fracs = 1 - cm_raw / (col_totals + EPS)
cm_normalized = 1 - harmonic_mean(inv_col_fracs, inv_row_fracs)
# For on axis, use the F1-score (also a harmonic mean!)
for i in range(len(cm_raw)):
recall = cm_raw[i, i] / (row_totals[i, 0] + EPS)
precision = cm_raw[i, i] / (col_totals[0, i] + EPS)
if row_totals[i, 0] == col_totals[0, i] == 0:
cm_normalized[i, i] = -1 # Not values to compute from
else:
cm_normalized[i, i] = harmonic_mean(recall, precision)
return ConfusionMatrix(cm_raw, cm_normalized)
def load_inferred(inference_path, extractors, whitelist):
"""Load inferred data and generate comparison data
"""
with gzip.GzipFile(inference_path) as f:
with nlj.open(f, json_lib='ujson') as src:
for row in src:
if whitelist is not None and row['mmsi'] not in whitelist:
continue
# Parsing dates is expensive and all extractors use dates, so parse them
# once up front
row['start_time'] = _parse(row['start_time'])
#dateutil.parser.parse(row['start_time'])
for ext in extractors:
ext.extract(row)
for ext in extractors:
ext.finalize()
# Conceptually an InferenceResult
# TODO: fix to make true subclass or return true inference result at finalization time or something.
def assemble_composite(results, mapping):
"""
Args:
results: InferenceResults instance
mapping: sequence of (composite_key, {base_keys})
Returns:
InferenceResults instance
Classes are remapped according to mapping.
"""
label_list = [lbl for (lbl, base_labels) in mapping]
inferred_scores = []
inferred_labels = []
true_labels = []
start_dates = []
inverse_mapping = {}
for new_label, base_labels in mapping:
for lbl in base_labels:
inverse_mapping[lbl] = new_label
base_label_map = {x: i for (i, x) in enumerate(results.label_list)}
for i, mmsi in enumerate(results.all_mmsi):
scores = {}
for (new_label, base_labels) in mapping:
scores[new_label] = 0
for lbl in base_labels:
scores[new_label] += results.all_scores[i][lbl]
inferred_scores.append(scores)
inferred_labels.append(max(scores, key=scores.__getitem__))
old_label = results.all_true_labels[i]
new_label = None if (old_label is None) else inverse_mapping[old_label]
true_labels.append(new_label)
start_dates.append(results.all_start_dates[i])
return InferenceResults(
trim(results.all_mmsi), trim(inferred_labels), trim(true_labels),
trim(start_dates), trim(inferred_scores), label_list,
np.array(results.all_mmsi), np.array(inferred_labels),
np.array(true_labels), np.array(start_dates),
np.array(inferred_scores))
this_dir = os.path.dirname(os.path.abspath(__file__))
temp_dir = os.path.join(this_dir, 'temp')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
parser = argparse.ArgumentParser(
description='Test inference results and output metrics.\n')
parser.add_argument(
'--inference-table', help='path to inference results', required=True)
parser.add_argument(
'--label-path', help='path to test data', required=True)
parser.add_argument(
'--dest-path', help='path to write results to', required=True)
args = parser.parse_args()
results = compute_results(args)
dump_html(args, results)
| [
198,
37811,
198,
198,
16281,
25,
628,
198,
29412,
24061,
62,
4164,
10466,
13,
9078,
220,
220,
220,
220,
1377,
259,
4288,
12,
6978,
17923,
62,
43420,
13,
17752,
13,
34586,
220,
220,
220,
220,
3467,
198,
220,
220,
220,
220,
220,
220,
... | 2.112036 | 9,613 |
import os
import glob
import math
import subprocess
import re
from decimal import Decimal
from astropy.io import fits
from astropy import wcs
#MODIFY THESE FIELDS AS NEEDED!
#input path *with* ending forward slash
input_path='./'
#output path *with* ending forward slash
#output_path='./renamed/'
output_path='./'
#does output directory exist? If not, create it
try:
os.mkdir(output_path)
except:
pass
#get a list of all FITS files in the input directory
im=glob.glob(input_path+'*.fits')+glob.glob(input_path+'*.fit')
#loop through all qualifying files and perform plate-solving
for i in range(0,len(im)):
prev=im[i]
new=''
for j in range(0,len(im[i])):
if im[i][j]==' ':
new+='_'
else:
new+=im[i][j]
os.chdir(".")
os.rename(prev, new)
print("\nRenaming %s"%(new))
#pull out RA/DEC from the FITS header, if they exist
d1=fits.open('%s'%(new))
d1.close()
h1=d1[0].header
try:
date_obs=h1['DATE-OBS']
except KeyError:
print "Error! Observation date/time not found in FITS header for %s."%(new)
quit()
date_obs = date_obs.replace(":","_")
date_obs = date_obs.replace("-","_")
date_obs = "." + date_obs
#create renamed FITS file
output_file = "%s"%(new.rsplit('.',1)[0])+date_obs+".fits"
output_file = output_file.rsplit('/',1)[1]
output_file = output_path+output_file
print("Writing renamed file to "+output_file)
os.system("mv %s "%(new)+output_file) | [
11748,
28686,
198,
11748,
15095,
198,
11748,
10688,
198,
11748,
850,
14681,
198,
11748,
302,
198,
6738,
32465,
1330,
4280,
4402,
198,
6738,
6468,
28338,
13,
952,
1330,
11414,
198,
6738,
6468,
28338,
1330,
266,
6359,
198,
198,
2,
33365,
... | 2.459507 | 568 |
import os
import json
import pathlib
import pickle
import tarfile
import pandas as pd
import xgboost
import joblib
import numpy as np
import pandas as pd
import xgboost
from sklearn.metrics import accuracy_score, classification_report, roc_auc_score
if __name__ == "__main__":
model_path = f"/opt/ml/processing/model/model.tar.gz"
with tarfile.open(model_path) as tar:
tar.extractall(path=".")
model = pickle.load(open("xgboost-model", "rb"))
print("Loading test input data")
test_path = "/opt/ml/processing/test/test.csv"
df = pd.read_csv(test_path, header=None)
y_test = df.iloc[:, 0].to_numpy()
df.drop(df.columns[0], axis=1, inplace=True)
X_test = xgboost.DMatrix(df.values)
predictions = model.predict(X_test)
print("Creating classification evaluation report")
acc = accuracy_score(y_test, predictions.round())
auc = roc_auc_score(y_test, predictions.round())
# The metrics reported can change based on the model used,
# but it must be a specific name per
# (https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-model-quality-metrics.html)
report_dict = {
"binary_classification_metrics": {
"accuracy": {
"value": acc,
"standard_deviation": "NaN",
},
"auc": {"value": auc, "standard_deviation": "NaN"},
},
}
print("Classification report:\n{}".format(report_dict))
evaluation_output_path = os.path.join("/opt/ml/processing/evaluation", "evaluation.json")
print("Saving classification report to {}".format(evaluation_output_path))
with open(evaluation_output_path, "w") as f:
f.write(json.dumps(report_dict))
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
3108,
8019,
198,
11748,
2298,
293,
198,
11748,
13422,
7753,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2124,
70,
39521,
198,
198,
11748,
1693,
8019,
198,
11748,
299,
32152,
355,
4... | 2.446779 | 714 |
from math import pi, sqrt
import numpy as np
import numpy.ma as ma
from numpy.linalg import cholesky, solve
from copy import copy, deepcopy
import choldate
import logging
from . import loggers
from .wrappers import exp, log, gammaln
logger = logging.getLogger(__name__)
def choleskyQuadForm(L, b):
"""Compute quadratic form: b' * A^(-1) * b using the cholesky inverse to reduce the problem to solving Lx=b where L is the
Lower triangular matrix resulting from the cholesky decomp of A
Args:
L (np.ndarray): Lower triangular matrix [shape=(d,d)]
b (np.ndarray): vector [shape=(d,)]
"""
fsubLinvb = solve(L, b)
return fsubLinvb.dot(fsubLinvb)
def choleskyDet(L):
"""Compute |A| = (product[i=1-->d]{ L_[i,i] })**2 : the square of the product of the diag. elements of L;
with L being the lower triangular matrix resulting from the cholesky decomp. of A
Args:
L (np.ndarray): Lower triangular matrix [shape=(d,d)]
"""
return np.product(np.diagonal(L))**2
def choleskyLogDet(L):
"""Compute ln(|A|) = 2*sum[i=1-->d]{ ln(L_[i,i]) } : twice the log sum of the diag. elements of L, the
lower triangular matrix resulting from the cholesky decomp. of A
Args:
L (np.ndarray): Lower triangular matrix [shape=(d,d)]
"""
return 2*np.sum(np.log(np.diagonal(L)))
class ModelEvidenceNIW():
"""Wrapper for updating cluster evidence through insertion/removal operations of each data item"""
def __init__(self, n, k, mu, cov, U=None):
"""initialize the prior"""
mu = np.atleast_1d(mu)
cov = np.atleast_2d(cov)
self._count = 0
self._dim = mu.size
self._n = n
self._k = k
self._mu = mu
self._U = cholesky(cov + k*np.outer(mu, mu)).T
if U is not None: self._U = np.atleast_2d(U)
self.enabled = True
# caching
self._cache = {}
# validate hyperparam settings
assert n >= self._dim
assert k > 0
assert cov.shape == tuple([self._dim]*2)
def _resetCache(self):
"""remove all cached vars"""
self._cache = {}
def disable(self):
"""throw error when making changes to disabled object"""
self.enabled = False
@property
@property
@dim.setter
@property
@property
@property
@property
@property
@property
def logMarginalLikelihood(self, x):
"""Computes marginal likelihood for a data vector given the model evidence of a Gaussian-Wishart
conjugate prior model using cholesky decmomposition of scaling matrix to increase efficiency.
All input arguments are assumed to be exclusive to cluster k already, thus the result is the marginal
likelihood that data x is in cluster k. repeated execution with varying args is necessary to produce the
full marg. likelihood over all clusters
Args:
x (np.ndarray): data vector [shape=(d,)]
evidence (ModelEvidence): class containing updated bayesian params and running storage of various inputs
Returns:
float describing marginal likelihood that data x belongs to cluster k given model evidence avg
"""
# intermediate steps - bayesian param update is already stored in evidence
d = self.dim
nu = self.nu
n = self.n
mu = self.mu
L = self.L
# compute student's T density given updated params
t1 = gammaln(0.5*(n+1)) - gammaln(0.5*nu)
t2 = - 0.5*(d*log(nu*pi) + choleskyLogDet(L))
t3 = - (0.5*(n+1))*log(1+(1/nu)*choleskyQuadForm(L, x-mu))
tdensln = t1 + t2 + t3
if not tdensln<=0:
tdens = exp(tdensln)
msg = \
"evidence:\n{}\n".format(self) + \
"data: {}\n".format(x) + \
"term 1: {}\n".format(t1) + \
"term 2: {}\n".format(t2) + \
"term 3: {}\n".format(t3) + \
"tdensln: {}\n".format(tdensln) + \
"tdens: {}".format(tdens)
raise ValueError('result of marginal likelihood is not a valid probability between 0->1: {:0.3e}\n{}'.format(tdens, msg))
return tdensln
def jointLogMarginalLikelihood(self, dataset):
"""Compute joint marginal likelihood for a set of data items assuming IID"""
accum = 0
for x in dataset:
accum += self.logMarginalLikelihood(x)
return accum
| [
6738,
10688,
1330,
31028,
11,
19862,
17034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
2611,
355,
17266,
198,
6738,
299,
32152,
13,
75,
1292,
70,
1330,
442,
4316,
2584,
11,
8494,
198,
6738,
4866,
1330,
4866,
11,
2... | 2.347466 | 1,934 |
"""
MIT License
Copyright (c) 2021 mooncell07
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import inspect
import warnings
from typing import Optional, Union, Any
from .enums import Camera
__all__ = (
"repr_gen",
"validate_cam",
)
def repr_gen(obj: Any) -> str:
"""
Forms a repr for obj.
"""
attrs = [
attr
for attr in inspect.getmembers(obj)
if not inspect.ismethod(attr[1])
if not attr[0].startswith("_")
]
fmt = ", ".join(f"{attr}={repr(value)}" for attr, value in attrs)
return f"{obj.__class__.__name__}({fmt})"
def validate_cam(
sprswrngs: bool, camera: Optional[Union[Camera, str]] = None
) -> Optional[str]:
"""
Validates the camera input.
"""
if camera is not None:
try:
cam: str = Camera(
camera.upper() if isinstance(camera, str) else camera
).value
return cam
except ValueError:
if not sprswrngs:
warnings.warn(
"Invalid value was passed for camera. "
"Making request without camera."
)
camera = None
| [
37811,
198,
36393,
13789,
198,
198,
15269,
357,
66,
8,
33448,
8824,
3846,
2998,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
1659,
428,
3788,
290,
3917,
10314,
3696,
357,
1169... | 2.71195 | 795 |
'''
Entry point. Init logging, initialize component factory,
start asyncio event loop, manage components lifecycle
'''
import logging
import yaml
import pytoml
import json
import sys
from .loadtest import LoadTest
LOG = logging.getLogger(__name__)
def init_logging(debug=False, filename='bfg.log'):
''' Configure logging: verbose or not '''
default_formatter = logging.Formatter(
"%(asctime)s [%(levelname)s] %(name)s: %(message)s", "%H:%M:%S")
dbg_formatter = logging.Formatter(
"%(asctime)s [%(levelname)s] %(name)s: %(message)s")
dbg_handler = logging.FileHandler(filename)
dbg_handler.setLevel(debug)
dbg_handler.setFormatter(dbg_formatter)
cmd_handler = logging.StreamHandler(sys.stdout)
cmd_handler.setLevel(logging.DEBUG if debug else logging.INFO)
cmd_handler.setFormatter(dbg_formatter if debug else default_formatter)
warn_handler = logging.StreamHandler(sys.stdout)
warn_handler.setLevel(logging.WARN)
warn_handler.setFormatter(dbg_formatter)
logger = logging.getLogger("hyper")
logger.setLevel(logging.WARNING)
logger = logging.getLogger("") # configure root logger
logger.setLevel(logging.DEBUG)
logger.addHandler(cmd_handler)
logger.addHandler(dbg_handler)
logging.getLogger().addHandler(dbg_handler)
def main():
''' Run test '''
config_filename = "load.yaml"
if len(sys.argv) > 1:
config_filename = sys.argv[1]
filename_components = config_filename.split('.')
if len(filename_components) > 1:
extension = filename_components[-1]
with open(config_filename, 'rb') as fin:
if extension == 'toml':
config = pytoml.load(fin)
elif extension in ['yaml', 'yml']:
config = yaml.load(fin)
elif extension == 'json':
config = json.load(fin)
else:
print("Config file has unsupported format: %s" % extension)
else:
print(
"Config file should have one of the following extensions:"
" .toml, .json, .yaml")
return 1
init_logging()
lt = LoadTest(config)
lt.run_test()
if __name__ == '__main__':
main()
| [
7061,
6,
198,
30150,
966,
13,
44707,
18931,
11,
41216,
7515,
8860,
11,
198,
9688,
30351,
952,
1785,
9052,
11,
6687,
6805,
3868,
47510,
198,
7061,
6,
198,
198,
11748,
18931,
198,
11748,
331,
43695,
198,
11748,
12972,
39532,
75,
198,
11... | 2.410618 | 923 |
import numpy as np
import matplotlib.pyplot as plt
import os
import shutil
# Generate the figures in the same folder
os.chdir(os.path.dirname(__file__))
rng = np.random.RandomState(42)
# 2D parameter space:
n_steps = 200
w1 = np.linspace(-2.5, 2.5, n_steps)
w2 = np.linspace(-2.5, 2.5, n_steps)
w1, w2 = np.meshgrid(w1, w2)
# 30 points dataset
n = 30
x = np.abs(rng.randn(n) + 1)
y = 2 * x + 0.5 * rng.randn(n) # f(x) = 2x + noise
loss = 0.
for x_i, y_i in zip(x, y):
loss += (y_i - mini_mlp(w1, w2, x_i)) ** 2
loss /= len(x)
# Plot output surface
fig = plt.figure(figsize=(8, 8))
plt.pcolormesh(w1, w2, loss, cmap=plt.cm.afmhot_r)
plt.contour(w1, w2, loss, 40, colors='k', alpha=0.3)
plt.xlabel('$w_1$')
plt.ylabel('$w_2$')
plt.title('Loss function of a ReLU net with 2 params')
fig.savefig("full_data_mlp_loss_landscape.png", dpi=80)
# SGD loss
vmin = 0
vmax = loss.max() * 1.5
folder = "tmp_loss_frames"
shutil.rmtree(folder, ignore_errors=True)
os.makedirs(folder)
for i in range(len(x)):
loss_i = (y[i] - mini_mlp(w1, w2, x[i])) ** 2
fig = plt.figure(figsize=(8, 8))
cmesh = plt.pcolormesh(w1, w2, loss_i, vmin=vmin, vmax=vmax,
cmap=plt.cm.afmhot_r)
contour = plt.contour(w1, w2, loss_i, 40, colors='k', alpha=0.3)
plt.text(-2, 1, "x = %0.2f ; y = %0.2f" % (x[i], y[i]))
plt.xlabel('$w_1$')
plt.ylabel('$w_2$')
plt.title('Loss function of a ReLU net with 2 params')
filename = '%s/loss_%03d.png' % (folder, i)
print('saving %s...' % filename)
fig.savefig(filename, dpi=80)
cmd = ("convert -resize 640x640 -delay 100 -loop 0 %s/*.png"
" sgd_mlp_loss_landscape.gif" % folder)
print(cmd)
os.system(cmd)
shutil.rmtree(folder, ignore_errors=True)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
198,
2,
2980,
378,
262,
5538,
287,
262,
976,
9483,
198,
418,
13,
354,
15908,
7,
418,
13,
697... | 2.018519 | 864 |
from __future__ import division, print_function
# This file has a bunch of settings that can be set via the Unity project settings.
# It also has some hacks for compatibility between versions of Python and Windows.
import getpass
import os
import tempfile
# Unix socket support.
whoami = getpass.getuser()
tempdir = tempfile.gettempdir()
unity_server_path = os.path.join(tempdir,
"com.unity.scripting.python-{}.socket".format(whoami)
)
# When processing jobs, we give clients this much time (in seconds) to respond
# within the same processing interval or wait for the next editor update.
process_jobs_max_batch_time = 1 / 90
| [
6738,
11593,
37443,
834,
1330,
7297,
11,
3601,
62,
8818,
198,
198,
2,
770,
2393,
468,
257,
7684,
286,
6460,
326,
460,
307,
900,
2884,
262,
18714,
1628,
6460,
13,
198,
2,
632,
635,
468,
617,
30445,
329,
17764,
1022,
6300,
286,
11361,... | 3.564246 | 179 |
from django import http
from django.conf import settings
from django.contrib.auth import logout
from django.core.urlresolvers import translate_url
from django.http import Http404
from django.http import HttpResponseRedirect
from django.utils.http import is_safe_url
from django.utils.translation import check_for_language
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils.translation import ugettext_lazy as _
from django.views.generic.base import View
from django.views.i18n import LANGUAGE_QUERY_PARAMETER
from kolibri.core.auth.constants import user_kinds
from kolibri.core.auth.models import Role
from kolibri.core.hooks import RoleBasedRedirectHook
# Modified from django.views.i18n
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER)
if lang_code and check_for_language(lang_code):
next_trans = translate_url(next, lang_code)
if next_trans != next:
response = http.HttpResponseRedirect(next_trans)
if hasattr(request, 'session'):
request.session[LANGUAGE_SESSION_KEY] = lang_code
# Always set cookie
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN)
return response
| [
6738,
42625,
14208,
1330,
2638,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
2604,
448,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
15772,
62,
6371,
198,
... | 2.532731 | 886 |
# redirection, so we can use subtree like pip
from .cv_pubsubs import webcam_pub, window_sub
| [
2,
2266,
4154,
11,
523,
356,
460,
779,
13284,
631,
588,
7347,
198,
6738,
764,
33967,
62,
12984,
7266,
82,
1330,
49823,
62,
12984,
11,
4324,
62,
7266,
198
] | 3.206897 | 29 |
from dataclasses import dataclass
from typing import Optional
@dataclass
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
32233,
628,
198,
31,
19608,
330,
31172,
198
] | 3.75 | 20 |
#
# e32calendar.py
#
# Copyright (c) 2006-2009 Nokia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import _calendar
# maps
replicationmap={"open":_calendar.rep_open,
"private":_calendar.rep_private,
"restricted":_calendar.rep_restricted}
_replicationreversemap=revdict(replicationmap)
entrytypemap={"appointment":_calendar.entry_type_appt,
"event":_calendar.entry_type_event,
"anniversary":_calendar.entry_type_anniv,
"todo":_calendar.entry_type_todo,
"reminder":_calendar.entry_type_reminder}
_entrytypereversemap=revdict(entrytypemap)
# Calendar database class
# Module methods
| [
2,
198,
2,
304,
2624,
9948,
9239,
13,
9078,
198,
2,
198,
2,
15069,
357,
66,
8,
4793,
12,
10531,
26182,
10501,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,... | 2.78972 | 428 |
from sys import stdin, stdout
t = int(stdin.readline())
for i in range(t):
n, s, k = map(int, stdin.readline().split())
a = [int(ai) for ai in stdin.readline().split()]
for j in range(n + 1):
if s + j not in a and s + j <= n or s - j not in a and s - j > 0:
stdout.write('{}\n'.format(j))
break | [
6738,
25064,
1330,
14367,
259,
11,
14367,
448,
198,
198,
83,
796,
493,
7,
19282,
259,
13,
961,
1370,
28955,
198,
1640,
1312,
287,
2837,
7,
83,
2599,
198,
220,
299,
11,
264,
11,
479,
796,
3975,
7,
600,
11,
14367,
259,
13,
961,
13... | 2.297101 | 138 |
import time
import pytest
import test_vnet as vnet
from swsscommon import swsscommon
from flaky import flaky
# Define fake platform for "DVS" fixture, so it will set "platform" environment variable for "orchagent".
# It is needed in order to enable platform specific "orchagent" code for testing "bitmap" VNET implementation.
DVS_FAKE_PLATFORM = "mellanox"
'''
Provides test cases for the "bitmap" VNET implementation.
Test cases are inherited from "test_vnet.py::TestVnetOrch" since they are the same for both "legacy" and "bitmap" implementation.
Difference between these two implementations is in set SAI attributes, so different values should be checked in ASIC_DB.
This class should override "get_vnet_obj()" method in order to return object with appropriate implementation of "check" APIs.
'''
@pytest.mark.flaky
class TestVnetBitmapOrch(vnet.TestVnetOrch):
'''
Returns specific VNET object with the appropriate implementation of "check" APIs for the "bitmap" VNET.
Test cases use these "check" APIs in order to verify whether correct config is applied to ASIC_DB.
'''
| [
11748,
640,
198,
11748,
12972,
9288,
198,
198,
11748,
1332,
62,
85,
3262,
355,
410,
3262,
198,
198,
6738,
1509,
824,
11321,
1330,
1509,
824,
11321,
198,
6738,
781,
15492,
1330,
781,
15492,
628,
198,
2,
2896,
500,
8390,
3859,
329,
366,... | 3.496815 | 314 |
import csv, difflib, functools, logging, re, os, sys, pickle
import unicodedata, unidecode
from collections import defaultdict, Counter
from fuzzywuzzy import fuzz
from postal.parser import parse_address
import enchant
DICTS = [ enchant.Dict("en_US") ]
SOURCE = 1
REFERENCE = 2
MIN_STRING_SCORE = 20
REQUIRES_SHARED_PROPER_NOUN = True
DONT_DISCRIMINATE = False
EXCLUDE_ADDRESS_AS_LABEL = False
FORBID_DUPE_ACRONYMS = False
ADD_VARIANT_WITHOUT_COUNTRY = False
ACRO_PATTERNS = ['[{}]', '({})']
UR_REGEX_GRP = '([A-Z0-9]{2,3}[0-9])' # '([A-Z]?[0-9]{3,4})'
UR_REGEXES_LABEL = dict([(kind, '\\b' + kind + ' ?-? ?' + UR_REGEX_GRP + '\\b') for kind in ['UR', 'UFR', 'UMR', 'UPR', 'CNR', 'EA', 'CNRS']])
UR_REGEXES_URL = dict([(kind, '\\b' + kind + UR_REGEX_GRP + '\\b') for kind in ['UR', 'UFR', 'UMR', 'UPR', 'CNR', 'EA', 'CNRS']])
REQUIRED_ADDR_FEATURES = set(['road', 'city', 'country'])
def isValidValue(v):
''' Validates a single value (sufficient non-empty data and such things) '''
stripped = stripped(v)
return len(stripped) > 0 and stripped not in ['null', 'NA', 'N/A']
MIN_ACRO_SIZE = 3
MAX_ACRO_SIZE = 6
def lowerOrNot(token, keepAcronyms, keepInitialized = False):
''' Set keepAcronyms to true in order to improve precision (e.g. a CAT scan will not be matched by a kitty). '''
if keepAcronyms and len(token) >= MIN_ACRO_SIZE and len(token) <= MAX_ACRO_SIZE and isAcroToken(token):
return token
if keepInitialized:
m = re.search("([A-Z][0-9]+)[^'a-zA-Z].*", token)
if m:
toKeep = m.group(0)
return toKeep + lowerOrNot(token[len(toKeep):], keepAcronyms, keepInitialized)
return token.lower()
def normalizeAndValidatePhrase(value,
keepAcronyms = False, tokenValidator = functools.partial(isValidToken, minLength = 2), phraseValidator = isValidPhrase):
''' Returns a string that joins normalized, valid tokens for the input phrase
(None if no valid tokens were found) '''
tokens = validateTokens(value, keepAcronyms, tokenValidator, phraseValidator)
return ' '.join(tokens) if len(tokens) > 0 else None
def fileToVariantMap(fileName, sep = '|', includeSelf = False):
''' The input format is pipe-separated, column 1 is the main variant, column 2 an alternative variant.
Returns a reverse index, namely a map from original alternative variant to original main variant
Parameters:
includeSelf if True, then the main variant will be included in the list of alternative variants
(so as to enable partial matching simultaneously).
'''
otherToMain = defaultdict(list)
mainToOther = defaultdict(list)
for r in fileRowIterator(fileName, sep):
if len(r) < 2: continue
main, alts = r[0], r[1:]
for alt in alts: otherToMain[alt].append(main)
mainToOther[main].extend(alts)
l = list([(other, next(iter(main))) for (other, main) in otherToMain.items() if len(main) < 2])
if includeSelf: l = list([(main, main) for main in mainToOther.keys()]) + l
return dict(l)
FRENCH_WORDS = set(fileToList('liste_mots_fr.col'))
STOP_WORDS_FR = set([
# Prepositions (excepted "avec" and "sans" which are semantically meaningful)
"a", "au", "aux", "de", "des", "du", "par", "pour", "sur", "chez", "dans", "sous", "vers",
# Articles
"le", "la", "les", "l", "c", "ce", "ca",
# Conjonctions of coordination
"mais", "et", "ou", "donc", "or", "ni", "car",
])
STOP_WORDS_EN = set(','.split("a, aboard, about, above, across, after, again, against, all, almost, alone, along, alongside, already, also, although, always, am, amid, amidst, among, amongst, an, and, another, anti, any, anybody, anyone, anything, anywhere, are, area, areas, aren't, around, as, ask, asked, asking, asks, astride, at, aught, away, back, backed, backing, backs, bar, barring, be, became, because, become, becomes, been, before, began, behind, being, beings, below, beneath, beside, besides, best, better, between, beyond, big, both, but, by, came, can, can't, cannot, case, cases, certain, certainly, circa, clear, clearly, come, concerning, considering, could, couldn't, daren't, despite, did, didn't, differ, different, differently, do, does, doesn't, doing, don't, done, down, down, downed, downing, downs, during, each, early, either, end, ended, ending, ends, enough, even, evenly, ever, every, everybody, everyone, everything, everywhere, except, excepting, excluding, face, faces, fact, facts, far, felt, few, fewer, find, finds, first, five, following, for, four, from, full, fully, further, furthered, furthering, furthers, gave, general, generally, get, gets, give, given, gives, go, goes, going, good, goods, got, great, greater, greatest, group, grouped, grouping, groups, had, hadn't, has, hasn't, have, haven't, having, he, he'd, he'll, he's, her, here, here's, hers, herself, high, high, high, higher, highest, him, himself, his, hisself, how, how's, however, i, i'd, i'll, i'm, i've, idem, if, ilk, important, in, including, inside, interest, interested, interesting, interests, into, is, isn't, it, it's, its, itself, just, keep, keeps, kind, knew, know, known, knows, large, largely, last, later, latest, least, less, let, let's, lets, like, likely, long, longer, longest, made, make, making, man, many, may, me, member, members, men, might, mightn't, mine, minus, more, most, mostly, mr, mrs, much, must, mustn't, my, myself, naught, near, necessary, need, needed, needing, needn't, needs, neither, never, new, new, newer, newest, next, no, nobody, non, none, noone, nor, not, nothing, notwithstanding, now, nowhere, number, numbers, of, off, often, old, older, oldest, on, once, one, oneself, only, onto, open, opened, opening, opens, opposite, or, order, ordered, ordering, orders, other, others, otherwise, ought, oughtn't, our, ours, ourself, ourselves, out, outside, over, own, part, parted, parting, parts, past, pending, per, perhaps, place, places, plus, point, pointed, pointing, points, possible, present, presented, presenting, presents, problem, problems, put, puts, quite, rather, really, regarding, right, right, room, rooms, round, said, same, save, saw, say, says, second, seconds, see, seem, seemed, seeming, seems, seen, sees, self, several, shall, shan't, she, she'd, she'll, she's, should, shouldn't, show, showed, showing, shows, side, sides, since, small, smaller, smallest, so, some, somebody, someone, something, somewhat, somewhere, state, states, still, still, such, suchlike, sundry, sure, take, taken, than, that, that's, the, thee, their, theirs, them, themselves, then, there, there's, therefore, these, they, they'd, they'll, they're, they've, thine, thing, things, think, thinks, this, those, thou, though, thought, thoughts, three, through, throughout, thus, thyself, till, to, today, together, too, took, tother, toward, towards, turn, turned, turning, turns, twain, two, under, underneath, unless, unlike, until, up, upon, us, use, used, uses, various, versus, very, via, vis-a-vis, want, wanted, wanting, wants, was, wasn't, way, ways, we, we'd, we'll, we're, we've, well, wells, went, were, weren't, what, what's, whatall, whatever, whatsoever, when, when's, where, where's, whereas, wherewith, wherewithal, whether, which, whichever, whichsoever, while, who, who's, whoever, whole, whom, whomever, whomso, whomsoever, whose, whosoever, why, why's, will, with, within, without, won't, work, worked, working, works, worth, would, wouldn't, ye, year, years, yet, yon, yonder, you, you'd, you'll, you're, you've, you-all, young, younger, youngest, your, yours, yourself, yourselves"))
STOP_WORDS = STOP_WORDS_FR | STOP_WORDS_EN
NON_DISCRIMINATING_TOKENS = list([justCase(t) for t in [
# FR
'Société', 'Université', 'Unité', 'Pôle', 'Groupe', 'SA', 'Entreprise',
# EN
'Society', 'University', 'Hospital', 'Department', 'Group', 'Ltd'
'Agency', 'Institute', 'College', 'Faculty', 'Authority',
'Academy', 'Department', 'Center', 'Centre', 'School', 'Enterprise', 'Company',
'Foundation', 'City', 'Clinic', 'Consulting', 'Organization',
# DE
'Klinikum', 'Hochschule', 'Fachhochschule',
# IT
'Istituto', 'Regione', 'Comune', 'Centro',
# ES
'Universidad', 'Agencia', 'Servicio', 'Conselleria', 'Museo', 'Fundacion',
# PL
'Uniwersytet', 'Centrum', 'Akademia'
]])
TRANSLATIONS = inverse_translation_map({
'University': ['Université', 'Universidad', 'Universität', 'Universitat', 'Univ', 'Universita'],
'Laboratory': ['Lab', 'Laboratoire', 'Labo'],
'Hospital': ['Hôpital'],
'Agency': ['Agence', 'Agencia'],
'Department': ['Dipartimento', 'Département', 'Dpto', 'Dpt'],
'City': ['Commune', 'Comune'],
'Clinic': ['Clinique', 'Klinikum'],
'CH': ['Complejo Hospitalario', 'Centre Hospitalier'],
'Academy': ['Académie', 'Akademia', 'Aca'],
'Institute': ['Institut', 'Instituto', 'Istituto', 'Instytut'],
'Center': ['Centre', 'Centrum', 'Zentrum'],
'Association': ['Asociacion'],
'Society': ['Société', 'Societa', 'Gesellschaft'],
'Development': ['Développement'],
'Consulting': ['Conseil'],
'Foundation': ['Fundacion', 'Fondation'],
'European': ['Européen'],
'Technology': ['Technologie'],
'Systems': ['Systèmes'],
'School': ['École', 'Escuela', 'Scuola'],
'Industrial': ['Industriel', 'Industrie', 'Industrial'],
'Research': ['Recherche'],
'UM': ['unité mixte'],
'Medical Center': ['MC'],
'Energy': ['Energie', 'Energia', 'Power'],
'Organization': ['Ograniczona'],
'Institute': ['Inst', 'Institut', 'Institució', 'Institucion'],
'Technical University': ['TU', 'Technische Universität', 'Technical Univ', 'Tech Univ'],
'Limited': ['Ltd']
})
CATEGORIES = inverse_regex_map({
'University': ['univ*', 'facult.', 'campus', 'departe?ment'],
'School': ['scol*', 'school'],
'Company': ['ltd', 'inc', 'sas', 'gmbh', 'sarl', 'sa', 'ab'],
'Medical': ['medic*', 'hospi*', 'hopi.*', 'clini*', 'chu', 'ch', 'klinik', 'service'],
'Research': ['unite', 'unit', 'lab*', 'recherche', 'umr', 'ufr', 'cnrs', 'cea'],
'Other': ['committee', 'comite', 'agence', 'institute', 'bureau']
})
def lowerOrNot(token, keepAcronyms = False, keepInitialized = False):
''' Set keepAcronyms to true in order to improve precision (e.g. a CAT scan will not be matched by a kitty). '''
if keepAcronyms and len(token) >= MIN_ACRO_SIZE and len(token) <= MAX_ACRO_SIZE and isAcroToken(token):
return token
if keepInitialized:
m = re.search("([A-Z][0-9]+)[^'a-zA-Z].*", token)
if m:
toKeep = m.group(0)
return toKeep + lowerOrNot(token[len(toKeep):], keepAcronyms, keepInitialized)
return token.lower()
# A map from alt variant to main variant
SYNMAP = fileToVariantMap('resource/grid_synonyms')
GRID_DATA = dict()
| [
11748,
269,
21370,
11,
814,
8019,
11,
1257,
310,
10141,
11,
18931,
11,
302,
11,
28686,
11,
25064,
11,
2298,
293,
198,
11748,
28000,
9043,
1045,
11,
555,
485,
8189,
198,
6738,
17268,
1330,
4277,
11600,
11,
15034,
198,
198,
6738,
34669,... | 2.803791 | 3,746 |
from data_importers.ems_importers import BaseHalaroseCsvImporter
| [
6738,
1366,
62,
320,
1819,
1010,
13,
5232,
62,
320,
1819,
1010,
1330,
7308,
40202,
283,
577,
34,
21370,
3546,
26634,
628
] | 3 | 22 |
# This file is based on the ghp-import package released under
# the Tumbolia Public License.
# Tumbolia Public License
# Copyright 2013, Paul Davis <paul.joseph.davis@gmail.com>
# Copying and distribution of this file, with or without modification, are
# permitted in any medium without royalty provided the copyright notice and
# this notice are preserved.
# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
# 0. opan saurce LOL
from __future__ import unicode_literals
import os
import subprocess as sp
import sys
import time
import unicodedata
from six import binary_type, text_type
| [
2,
770,
2393,
318,
1912,
319,
262,
308,
24831,
12,
11748,
5301,
2716,
739,
198,
2,
262,
309,
2178,
22703,
5094,
13789,
13,
198,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 3.20297 | 202 |
#!/usr/bin/env python3
import sys
import io
import getopt
from .S.language import SProgram
from .S.interp import StopProgram, SInterp
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
25064,
198,
11748,
33245,
198,
11748,
651,
8738,
198,
198,
6738,
764,
50,
13,
16129,
1330,
311,
15167,
198,
6738,
764,
50,
13,
3849,
79,
1330,
13707,
15167,
11,
311,
9492,
... | 3.068182 | 44 |
from sys import prefix
from flask import Blueprint
from app.api.v1 import v1_bp
from app.api.v2 import v2_bp
api_bp = Blueprint('api', __name__)
api_bp.register_blueprint(v1_bp, url_prefix='/v1')
api_bp.register_blueprint(v2_bp, url_prefix='/v2')
| [
6738,
25064,
1330,
21231,
198,
6738,
42903,
1330,
39932,
198,
6738,
598,
13,
15042,
13,
85,
16,
1330,
410,
16,
62,
46583,
198,
6738,
598,
13,
15042,
13,
85,
17,
1330,
410,
17,
62,
46583,
198,
198,
15042,
62,
46583,
796,
39932,
10786... | 2.56701 | 97 |
#!/usr/bin/env python3
# Copyright (C) Alibaba Group Holding Limited.
""" TAdaConv. """
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _triple
class RouteFuncMLP(nn.Module):
"""
The routing function for generating the calibration weights.
"""
def __init__(self, c_in, ratio, kernels, bn_eps=1e-5, bn_mmt=0.1):
"""
Args:
c_in (int): number of input channels.
ratio (int): reduction ratio for the routing function.
kernels (list): temporal kernel size of the stacked 1D convolutions
"""
super(RouteFuncMLP, self).__init__()
self.c_in = c_in
self.avgpool = nn.AdaptiveAvgPool3d((None,1,1))
self.globalpool = nn.AdaptiveAvgPool3d(1)
self.g = nn.Conv3d(
in_channels=c_in,
out_channels=c_in,
kernel_size=1,
padding=0,
)
self.a = nn.Conv3d(
in_channels=c_in,
out_channels=int(c_in//ratio),
kernel_size=[kernels[0],1,1],
padding=[kernels[0]//2,0,0],
)
self.bn = nn.BatchNorm3d(int(c_in//ratio), eps=bn_eps, momentum=bn_mmt)
self.relu = nn.ReLU(inplace=True)
self.b = nn.Conv3d(
in_channels=int(c_in//ratio),
out_channels=c_in,
kernel_size=[kernels[1],1,1],
padding=[kernels[1]//2,0,0],
bias=False
)
self.b.skip_init=True
self.b.weight.data.zero_() # to make sure the initial values
# for the output is 1.
class TAdaConv2d(nn.Module):
"""
Performs temporally adaptive 2D convolution.
Currently, only application on 5D tensors is supported, which makes TAdaConv2d
essentially a 3D convolution with temporal kernel size of 1.
"""
def forward(self, x, alpha):
"""
Args:
x (tensor): feature to perform convolution on.
alpha (tensor): calibration weight for the base weights.
W_t = alpha_t * W_b
"""
_, _, c_out, c_in, kh, kw = self.weight.size()
b, c_in, t, h, w = x.size()
x = x.permute(0,2,1,3,4).reshape(1,-1,h,w)
if self.cal_dim == "cin":
# w_alpha: B, C, T, H(1), W(1) -> B, T, C, H(1), W(1) -> B, T, 1, C, H(1), W(1)
# corresponding to calibrating the input channel
weight = (alpha.permute(0,2,1,3,4).unsqueeze(2) * self.weight).reshape(-1, c_in//self.groups, kh, kw)
elif self.cal_dim == "cout":
# w_alpha: B, C, T, H(1), W(1) -> B, T, C, H(1), W(1) -> B, T, C, 1, H(1), W(1)
# corresponding to calibrating the input channel
weight = (alpha.permute(0,2,1,3,4).unsqueeze(3) * self.weight).reshape(-1, c_in//self.groups, kh, kw)
bias = None
if self.bias is not None:
# in the official implementation of TAda2D,
# there is no bias term in the convs
# hence the performance with bias is not validated
bias = self.bias.repeat(b, t, 1).reshape(-1)
output = F.conv2d(
x, weight=weight, bias=bias, stride=self.stride[1:], padding=self.padding[1:],
dilation=self.dilation[1:], groups=self.groups * b * t)
output = output.view(b, t, c_out, output.size(-2), output.size(-1)).permute(0,2,1,3,4)
return output
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
34,
8,
41992,
4912,
31703,
15302,
13,
220,
198,
198,
37811,
309,
2782,
64,
3103,
85,
13,
37227,
198,
198,
11748,
10688,
198,
11748,
28034,
198,
11748,
28034,
13,
2... | 1.968768 | 1,761 |
from pathlib import Path
import torch
import torch.nn as nn
from AbstractModel import AbstractModel
from Attention import Attention
from Tokenizer import Tokenizer
from StopWords import StopWords
'''
'''
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
6738,
27741,
17633,
1330,
27741,
17633,
198,
6738,
47406,
1330,
47406,
198,
198,
6738,
29130,
7509,
1330,
29130,
7509,
198,
6738,... | 3.907407 | 54 |
"""Add notes field for tasks
Revision ID: 707bac2b6bd1
Revises: 58ea8596e2db
Create Date: 2017-09-10 11:31:03.741908
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '707bac2b6bd1'
down_revision = '58ea8596e2db'
branch_labels = None
depends_on = None
| [
37811,
4550,
4710,
2214,
329,
8861,
198,
198,
18009,
1166,
4522,
25,
767,
2998,
65,
330,
17,
65,
21,
17457,
16,
198,
18009,
2696,
25,
7618,
18213,
23,
45734,
68,
17,
9945,
198,
16447,
7536,
25,
2177,
12,
2931,
12,
940,
1367,
25,
3... | 2.488 | 125 |
from distutils.core import setup
import sys
import comet
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name="Comet",
description=comet.__description__,
author=comet.__author__,
author_email=comet.__contact__,
version=comet.__version__,
url=comet.__url__,
packages=[
'comet',
'comet.handler',
'comet.handler.test',
'comet.log',
'comet.log.test',
'comet.plugins',
'comet.plugins.test',
'comet.protocol',
'comet.protocol.test',
'comet.service',
'comet.service.test',
'comet.utility',
'comet.utility.test',
'comet.validator',
'comet.validator.test',
'twisted'
],
scripts=['scripts/comet-sendvo'],
package_data={
'comet': ['schema/*.xsd'],
'comet.handler.test': [
'test_spawn.sh',
'test_spawn_failure.sh',
'test_spawn_output.sh',
'test_spawn_stdout.sh'
],
'twisted': ['plugins/comet_plugin.py']
},
install_requires=required
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
11748,
25064,
198,
11748,
31733,
198,
198,
4480,
1280,
10786,
8897,
18883,
13,
14116,
11537,
355,
277,
25,
198,
220,
220,
220,
2672,
796,
277,
13,
961,
22446,
35312,
6615,
3419,
198,
198,
... | 1.973404 | 564 |
from sys import stdin
n, t = map(int, stdin.readline().split())
li = [int(c) for c in stdin.readline().split()]
lb = 1
rb = 10 ** 18
while lb < rb:
mb = (lb + rb) // 2
s = 0
for i in range(n):
s += min(mb // li[i], 10 ** 9)
if s >= t:
rb = mb
else:
lb = mb + 1
print(lb)
| [
6738,
25064,
1330,
14367,
259,
201,
198,
201,
198,
77,
11,
256,
796,
3975,
7,
600,
11,
14367,
259,
13,
961,
1370,
22446,
35312,
28955,
201,
198,
4528,
796,
685,
600,
7,
66,
8,
329,
269,
287,
14367,
259,
13,
961,
1370,
22446,
35312... | 1.827027 | 185 |
"""
pygments.lexers.functional
~~~~~~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.lisp import SchemeLexer, CommonLispLexer, RacketLexer, \
NewLispLexer, ShenLexer
from pygments.lexers.haskell import HaskellLexer, LiterateHaskellLexer, \
KokaLexer
from pygments.lexers.theorem import CoqLexer
from pygments.lexers.erlang import ErlangLexer, ErlangShellLexer, \
ElixirConsoleLexer, ElixirLexer
from pygments.lexers.ml import SMLLexer, OcamlLexer, OpaLexer
__all__ = []
| [
37811,
201,
198,
220,
220,
220,
12972,
11726,
13,
2588,
364,
13,
45124,
201,
198,
220,
220,
220,
220,
27156,
15116,
4907,
201,
198,
201,
198,
220,
220,
220,
2329,
10784,
31191,
263,
6097,
4271,
7763,
287,
428,
8265,
13,
201,
198,
20... | 2.743083 | 253 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from openstack import exceptions
from openstack import resource
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
257,
4866,
286,
262,
13789,
379,
19... | 3.89441 | 161 |
# -*- encoding: utf-8 *-*
import os
import re
import sys
from glob import glob
from distutils.command.build import build
from distutils.core import Command
import textwrap
min_python = (3, 4)
my_python = sys.version_info
if my_python < min_python:
print("Borg requires Python %d.%d or later" % min_python)
sys.exit(1)
# Are we building on ReadTheDocs?
on_rtd = os.environ.get('READTHEDOCS')
# msgpack pure python data corruption was fixed in 0.4.6.
# Also, we might use some rather recent API features.
install_requires = ['msgpack-python>=0.4.6', ]
extras_require = {
# llfuse 0.40 (tested, proven, ok), needs FUSE version >= 2.8.0
# llfuse 0.41 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 0.41.1 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 0.42 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 1.0 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 2.0 will break API
'fuse': ['llfuse<2.0', ],
}
if sys.platform.startswith('freebsd'):
# while llfuse 1.0 is the latest llfuse release right now,
# llfuse 0.41.1 is the latest release that actually builds on freebsd:
extras_require['fuse'] = ['llfuse==0.41.1', ]
from setuptools import setup, find_packages, Extension
from setuptools.command.sdist import sdist
compress_source = 'src/borg/compress.pyx'
crypto_source = 'src/borg/crypto.pyx'
chunker_source = 'src/borg/chunker.pyx'
hashindex_source = 'src/borg/hashindex.pyx'
platform_posix_source = 'src/borg/platform/posix.pyx'
platform_linux_source = 'src/borg/platform/linux.pyx'
platform_darwin_source = 'src/borg/platform/darwin.pyx'
platform_freebsd_source = 'src/borg/platform/freebsd.pyx'
cython_sources = [
compress_source,
crypto_source,
chunker_source,
hashindex_source,
platform_posix_source,
platform_linux_source,
platform_freebsd_source,
platform_darwin_source,
]
try:
from Cython.Distutils import build_ext
import Cython.Compiler.Main as cython_compiler
except ImportError:
compress_source = compress_source.replace('.pyx', '.c')
crypto_source = crypto_source.replace('.pyx', '.c')
chunker_source = chunker_source.replace('.pyx', '.c')
hashindex_source = hashindex_source.replace('.pyx', '.c')
platform_posix_source = platform_posix_source.replace('.pyx', '.c')
platform_linux_source = platform_linux_source.replace('.pyx', '.c')
platform_freebsd_source = platform_freebsd_source.replace('.pyx', '.c')
platform_darwin_source = platform_darwin_source.replace('.pyx', '.c')
from distutils.command.build_ext import build_ext
if not on_rtd and not all(os.path.exists(path) for path in [
compress_source, crypto_source, chunker_source, hashindex_source,
platform_posix_source, platform_linux_source, platform_freebsd_source, platform_darwin_source]):
raise ImportError('The GIT version of Borg needs Cython. Install Cython or use a released version.')
include_dirs = []
library_dirs = []
possible_openssl_prefixes = ['/usr', '/usr/local', '/usr/local/opt/openssl', '/usr/local/ssl', '/usr/local/openssl',
'/usr/local/borg', '/opt/local', '/opt/pkg', ]
if os.environ.get('BORG_OPENSSL_PREFIX'):
possible_openssl_prefixes.insert(0, os.environ.get('BORG_OPENSSL_PREFIX'))
ssl_prefix = detect_openssl(possible_openssl_prefixes)
if not ssl_prefix:
raise Exception('Unable to find OpenSSL >= 1.0 headers. (Looked here: {})'.format(', '.join(possible_openssl_prefixes)))
include_dirs.append(os.path.join(ssl_prefix, 'include'))
library_dirs.append(os.path.join(ssl_prefix, 'lib'))
possible_lz4_prefixes = ['/usr', '/usr/local', '/usr/local/opt/lz4', '/usr/local/lz4',
'/usr/local/borg', '/opt/local', '/opt/pkg', ]
if os.environ.get('BORG_LZ4_PREFIX'):
possible_lz4_prefixes.insert(0, os.environ.get('BORG_LZ4_PREFIX'))
lz4_prefix = detect_lz4(possible_lz4_prefixes)
if lz4_prefix:
include_dirs.append(os.path.join(lz4_prefix, 'include'))
library_dirs.append(os.path.join(lz4_prefix, 'lib'))
elif not on_rtd:
raise Exception('Unable to find LZ4 headers. (Looked here: {})'.format(', '.join(possible_lz4_prefixes)))
with open('README.rst', 'r') as fd:
long_description = fd.read()
cmdclass = {
'build_ext': build_ext,
'build_api': build_api,
'build_usage': build_usage,
'sdist': Sdist
}
ext_modules = []
if not on_rtd:
ext_modules += [
Extension('borg.compress', [compress_source], libraries=['lz4'], include_dirs=include_dirs, library_dirs=library_dirs),
Extension('borg.crypto', [crypto_source], libraries=['crypto'], include_dirs=include_dirs, library_dirs=library_dirs),
Extension('borg.chunker', [chunker_source]),
Extension('borg.hashindex', [hashindex_source])
]
if sys.platform.startswith(('linux', 'freebsd', 'darwin')):
ext_modules.append(Extension('borg.platform.posix', [platform_posix_source]))
if sys.platform == 'linux':
ext_modules.append(Extension('borg.platform.linux', [platform_linux_source], libraries=['acl']))
elif sys.platform.startswith('freebsd'):
ext_modules.append(Extension('borg.platform.freebsd', [platform_freebsd_source]))
elif sys.platform == 'darwin':
ext_modules.append(Extension('borg.platform.darwin', [platform_darwin_source]))
setup(
name='borgbackup',
use_scm_version={
'write_to': 'src/borg/_version.py',
},
author='The Borg Collective (see AUTHORS file)',
author_email='borgbackup@python.org',
url='https://borgbackup.readthedocs.io/',
description='Deduplicated, encrypted, authenticated and compressed backups',
long_description=long_description,
license='BSD',
platforms=['Linux', 'MacOS X', 'FreeBSD', 'OpenBSD', 'NetBSD', ],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: BSD :: OpenBSD',
'Operating System :: POSIX :: BSD :: NetBSD',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Security :: Cryptography',
'Topic :: System :: Archiving :: Backup',
],
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'borg = borg.archiver:main',
'borgfs = borg.archiver:main',
]
},
cmdclass=cmdclass,
ext_modules=ext_modules,
setup_requires=['setuptools_scm>=1.7'],
install_requires=install_requires,
extras_require=extras_require,
)
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
1635,
12,
9,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198,
6738,
15095,
1330,
15095,
198,
198,
6738,
1233,
26791,
13,
21812,
13,
11249,
1330,
1382,
198,
6738,
1233,
26791,
13... | 2.529708 | 2,777 |
# License: Apache-2.0
from ..transformers.transformer import Transformer
from typing import List, Dict, Union
import numpy as np
import pandas as pd
import databricks.koalas as ks
class _BaseImputer(Transformer):
"""Base imputer transformer class.
Parameters
----------
strategy : str
Imputation strategy. The possible values are:
* constant
* most_frequent (only for the FloatImputer class)
* mean (only for the FloatImputer class)
* median (only for the FloatImputer class)
value (Union[float, str, None]): Imputation value, default to None.
used for `strategy=constant`.
columns: List[str], default to None.
List of columns.
"""
def transform(
self, X: Union[pd.DataFrame, ks.DataFrame]
) -> Union[pd.DataFrame, ks.DataFrame]:
"""Transform the dataframe `X`.
Parameters
----------
X : Union[pd.DataFrame, ks.DataFrame].
Input dataframe.
Returns
-------
Union[pd.DataFrame, ks.DataFrame]
Transformed dataframe.
"""
self.check_dataframe(X)
if isinstance(X, pd.DataFrame):
return X.fillna(self.statistics)
for col, val in self.statistics.items():
X[col] = X[col].fillna(val)
return X
@staticmethod
def compute_statistics(
X: Union[pd.DataFrame, ks.DataFrame], columns: List[str],
strategy: str,
value: Union[float, int, str, None]
) -> Dict[str, Union[float, int, str]]:
"""Compute the imputation values.
Parameters
----------
X : Union[pd.DataFrame, ks.DataFrame]
Dataframe used to compute the imputation values.
columns : List[str]
Columns to consider.
strategy : str
Imputation strategy.
value : Union[float, int, str, None]
Value used for imputation.
Returns
-------
Dict[str, Union[float, int, str]]
Imputation value mapping.
"""
if strategy == 'mean':
statistics = X[columns].astype(np.float64).mean().to_dict()
elif strategy == 'median':
statistics = X[columns].astype(np.float64).median().to_dict()
elif strategy == 'most_frequent':
values = [
X[c].value_counts().index.to_numpy()[0]
for c in columns
]
statistics = dict(zip(columns, values))
else: # strategy == 'constant'
values = len(columns) * [value]
statistics = dict(zip(columns, values))
if pd.Series(statistics).isnull().sum():
raise ValueError(
'''Some columns contains only NaN values and the
imputation values cannot be calculated.
Remove these columns
before performing the imputation
(e.g. with `gators.data_cleaning.drop_high_nan_ratio()`).''')
return statistics
| [
2,
13789,
25,
24843,
12,
17,
13,
15,
198,
6738,
11485,
35636,
364,
13,
7645,
16354,
1330,
3602,
16354,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
11,
4479,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
1... | 2.204924 | 1,381 |
# Generated by Django 3.1.5 on 2021-01-11 19:54
from django.db import migrations
import django_db_views.migration_functions
import django_db_views.operations
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
20,
319,
33448,
12,
486,
12,
1157,
678,
25,
4051,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
198,
11748,
42625,
14208,
62,
9945,
62,
33571,
13,
76,
4254,
62,
12543,
2733,
1... | 2.909091 | 55 |
# Created byMartin.cz
# Copyright (c) Martin Strohalm. All rights reserved.
from .. enums import *
from .. properties import *
from . graphics import Graphics
from . frame import Frame, FrameProperty
from . path import Path
class Layout(Graphics):
"""
Layout represents an experimental simple table-like layout manager tool.
You can define custom layout by specifying rows and columns of fixed or
relative size. Any pero.Graphics can then be inserted into specific cell.
When the layout is drawn the cells are arranged according to available space
and specific settings. The exact position of each cell is available via its
'frame' property.
Each cell graphics is drawn on temporarily modified canvas with specific
drawing region (view) applied. This allows to use relative coordinates
within the graphics. The exact position of each cell content is available
via its 'content' property.
Properties:
x: int or float
Specifies the x-coordinate of the top-left corner
y: int or float
Specifies the y-coordinate of the top-left corner
width: int, float or UNDEF
Specifies the full layout width. If set to UNDEF the full area of
given canvas is used.
height: int, float or UNDEF
Specifies the full layout height. If set to UNDEF the full area of
given canvas is used.
padding: int, float or tuple
Specifies the inner space of the layout as a single value or values
for individual sides starting from top.
spacing: int or float
Specifies the space in-between individual cells.
fill properties:
Includes pero.FillProperties to specify the background fill.
rows: (pero.Row,) (read-only)
Gets rows definitions.
cols: (pero.Column,) (read-only)
Gets columns definitions.
cells: (pero.Cell,) (read-only)
Gets cells definitions.
"""
x = NumProperty(0)
y = NumProperty(0)
width = NumProperty(UNDEF)
height = NumProperty(UNDEF)
padding = QuadProperty(0)
spacing = NumProperty(0)
fill = Include(FillProperties, fill_color="w")
def __init__(self, **overrides):
"""Initializes a new instance of Layout."""
super().__init__(**overrides)
self._rows = []
self._cols = []
self._cells = []
self._grid = []
@property
def rows(self):
"""
Gets rows definitions.
Returns:
(pero.Row,)
Tuple of rows definitions.
"""
return tuple(self._rows)
@property
def cols(self):
"""
Gets columns definitions.
Returns:
(pero.Column,)
Tuple of columns definitions.
"""
return tuple(self._cols)
@property
def cells(self):
"""
Gets cells definition.
Returns:
(pero.Cell,)
Tuple of cells definitions.
"""
return tuple(self._cells)
def get_cell(self, row, col):
"""
Gets the cell at specified layout grid position.
Args:
row: int
Row index of requested cell.
col: int
Column index of requested cell.
Returns:
pero.Cell or None
Corresponding cell or None.
"""
return self._grid[row][col]
def get_cell_below(self, x, y):
"""
Gets the cell for which given coordinates fall into its bounding box.
Args:
x: int or float
X-coordinate in logical units.
y: int or float
Y-coordinate in logical units.
Returns:
pero.Cell or None
Corresponding cell or None.
"""
for cell in self._cells:
if cell.frame and cell.frame.contains(x, y):
return cell
return None
def draw(self, canvas, source=UNDEF, **overrides):
"""Uses given canvas to draw layout cells."""
# check if visible
if not self.is_visible(source, overrides):
return
# get properties
tag = self.get_property('tag', source, overrides)
x = self.get_property('x', source, overrides)
y = self.get_property('y', source, overrides)
width = self.get_property('width', source, overrides)
height = self.get_property('height', source, overrides)
# get size from canvas
if width is UNDEF:
width = canvas.viewport.width if canvas else 0
if height is UNDEF:
height = canvas.viewport.height if canvas else 0
# arrange cells
self.arrange(canvas, source, **overrides)
# start drawing group
canvas.group(tag, "layout")
# set pen and brush
canvas.line_width = 0
canvas.set_brush_by(self, source=source, overrides=overrides)
# draw background
canvas.draw_rect(x, y, width, height)
# draw cells
for cell in sorted(self._cells, key=lambda c: c.z_index):
cell.draw(canvas)
# end drawing group
canvas.ungroup()
def arrange(self, canvas=None, source=UNDEF, **overrides):
"""Calculates and sets cells frames."""
# get properties
x = self.get_property('x', source, overrides)
y = self.get_property('y', source, overrides)
width = self.get_property('width', source, overrides)
height = self.get_property('height', source, overrides)
padding = self.get_property('padding', source, overrides)
spacing = self.get_property('spacing', source, overrides)
# get size from canvas
if width is UNDEF:
width = canvas.viewport.width if canvas else 0
if height is UNDEF:
height = canvas.viewport.height if canvas else 0
# apply padding and spacing
padding = padding or (0, 0, 0, 0)
x += padding[3]
y += padding[0]
width -= spacing * (len(self._cols) - 1) + padding[1] + padding[3]
height -= spacing * (len(self._rows) - 1) + padding[0] + padding[2]
# arrange rows and cols
heights = self._arrange_rows(height, spacing)
widths = self._arrange_cols(width, spacing)
# arrange cells
for cell in self._cells:
r, rs = cell.row, cell.row_span
c, cs = cell.col, cell.col_span
cell.frame = Frame(
x = x + sum(widths[0:c]) + spacing * c,
y = y + sum(heights[0:r]) + spacing * r,
width = sum(widths[c:c+cs]) + spacing * (cs-1),
height = sum(heights[r:r+rs]) + spacing * (rs-1))
cell.arrange(canvas)
def add(self, graphics, row, col, **overrides):
"""
Adds graphics to specified layout cell. Additional rows and columns are
added if necessary with relative size of 1.
Args:
graphics: pero.Graphics
Graphics to be added.
row: int
Index of the row into which the graphics should be added.
col: int
Index of the column into which the graphics should be added.
row_span: int
Number of rows the graphics should span.
col_span: int
Number of columns the graphics should span.
width: int, float or UNDEF
Minimum width of the content area. If not specified, the content
may use whole available width.
height: int, float or UNDEF
Minimum height of the content area. If not specified, the
content may use whole available height.
padding: int, float or tuple
Inner empty space of the cell as a single value or values for
individual sides starting from top.
h_expand: bool
If set to True the cell content can use full available width
even if the 'width' property is set.
v_expand: bool
If set to True the cell content can use full available height
even if the 'height' property is set.
h_align: str
Horizontal alignment of the cell content as any item from the
pero.POSITION_LRC enum. Note the for this to have any effect
the 'width' property must be specified.
v_align: str
Vertical alignment of the cell content as any item from the
pero.POSITION_TBC enum. Note the for this to have any effect
the 'height' property must be specified.
line properties:
All the pero.LineProperties to specify the cell background
outline.
fill properties:
All the pero.FillProperties to specify the cell background
fill.
"""
# check type
if not isinstance(graphics, Graphics):
message = "Graphics must be of type pero.Graphics! -> %s" % type(graphics)
raise TypeError(message)
# init cell
cell = Cell(
graphics = graphics,
row = row,
col = col,
**overrides)
# get used rows and columns
rows = cell.rows
cols = cell.cols
# add rows and columns if necessary
while len(self._rows) <= max(rows):
self.add_row(1, True)
while len(self._cols) <= max(cols):
self.add_col(1, True)
# check if cells are empty
for r in rows:
for c in cols:
if self._grid[r][c] is not None:
message = "Specified cell (%s,%s) is already used!" % (r, c)
raise ValueError(message)
# register cell
self._cells.append(cell)
# add cell to grid
for r in rows:
for c in cols:
self._grid[r][c] = cell
def add_row(self, height=1, relative=True):
"""
Adds additional row at the end of layout with specified absolute or
relative height.
Args:
height: int or float
Absolute or relative height of the row.
relative:
If set to True, specified height is considered as relative
portion of total available space after filling all fixed rows.
"""
# init row
row = Row(
height = height,
relative = relative)
# add row
self._rows.append(row)
# update grid
self._grid.append([None for c in range(len(self._cols))])
def add_col(self, width=1, relative=True):
"""
Adds additional column at the end of layout with specified absolute or
relative width.
Args:
width: int or float
Absolute or relative width of the column.
relative:
If set to True, specified width is considered as relative
portion of total available space after filling all fixed
columns.
"""
# init column
col = Column(
width = width,
relative = relative)
# add column
self._cols.append(col)
# update grid
for row in self._grid:
row.append(None)
def _arrange_rows(self, available, spacing):
"""Calculates final size of each row."""
# init buffers
heights = [0]*len(self._rows)
minima = [0]*len(self._rows)
dirty = False
# get sums
fix_sum = sum(r.height for r in self._rows if not r.relative)
rel_sum = sum(r.height for r in self._rows if r.relative)
# calc naive heights and minima
for i, row in enumerate(self._rows):
# set fixed row
if not row.relative:
heights[i] = row.height
minima[i] = row.height
# set relative row
else:
heights[i] = max(0, (available - fix_sum) * row.height / rel_sum)
cells = [c for c in self._grid[i] if c and c.height and c.row_span == 1]
minima[i] = max(c.height for c in cells) if cells else 0
# check minimum height
dirty = dirty or heights[i] < minima[i]
# force minima
while dirty:
dirty = False
for i, row in enumerate(self._rows):
if heights[i] < minima[i]:
heights[i] = minima[i]
fix_sum += minima[i]
rel_sum -= row.height
dirty = True
break
if heights[i] > minima[i] and row.relative:
heights[i] = max(0, (available - fix_sum) * row.height / rel_sum)
return heights
def _arrange_cols(self, available, spacing):
"""Calculates final size of each column."""
# init buffers
widths = [0]*len(self._cols)
minima = [0]*len(self._cols)
dirty = False
# get sums
fix_sum = sum(c.width for c in self._cols if not c.relative)
rel_sum = sum(c.width for c in self._cols if c.relative)
# calc naive widths and minima
for i, col in enumerate(self._cols):
# set fixed col
if not col.relative:
widths[i] = col.width
minima[i] = col.width
# set relative col
else:
widths[i] = max(0, (available - fix_sum) * col.width / rel_sum)
cells = [self._grid[r][i] for r in range(len(self._rows))]
cells = [c for c in cells if c and c.width and c.col_span == 1]
minima[i] = max(c.width for c in cells) if cells else 0
# check minimum height
dirty = dirty or widths[i] < minima[i]
# force minima
while dirty:
dirty = False
for i, col in enumerate(self._cols):
if widths[i] < minima[i]:
widths[i] = minima[i]
fix_sum += minima[i]
rel_sum -= col.width
dirty = True
break
if widths[i] > minima[i] and col.relative:
widths[i] = max(0, (available - fix_sum) * col.width / rel_sum)
return widths
class Row(PropertySet):
"""
Layout row definition. This is typically created automatically by layout
manager but the properties can be later changed if needed.
Properties:
height: int or float
Specifies absolute or relative height of the row.
relative: bool
Specifies whether the row height is considered as relative portion
of total available space after filling all fixed rows.
"""
height = NumProperty(UNDEF, dynamic=False)
relative = BoolProperty(False, dynamic=False)
class Column(PropertySet):
"""
Layout column definition. This is typically created automatically by layout
manager but the properties can be later changed if needed.
Properties:
width: int or float
Specifies absolute or relative width of the column.
relative: bool
Specifies whether the column width is considered as relative portion
of total available space after filling all fixed columns.
"""
width = NumProperty(UNDEF, dynamic=False)
relative = BoolProperty(False, dynamic=False)
class Cell(Graphics):
"""
Layout cell definition. This is typically created automatically by layout
manager but some of the properties can be later changed if needed.
Properties:
graphics: pero.Graphics
Specifies the graphics to drawn.
row: int
Specifies the index of the row in which the cell sits. This value
cannot be changed once set.
col: int
Specifies the index of the column in which the cell sits. This value
cannot be changed once set.
row_span: int
Specifies the number of rows the cell spans. This value cannot be
changed once set.
col_span: int
Specifies the number of columns the cell spans. This value cannot be
changed once set.
clip: bool
Specifies whether the content overflow should be clipped.
width: int, float or UNDEF
Specifies the minimum width of the content area. If not specified,
the content may use whole available width.
height: int, float or UNDEF
Specifies the minimum height of the content area. If not specified,
the content may use whole available height.
padding: int, float or tuple
Specifies the inner empty space of the cell as a single value or
values for individual sides starting from top.
h_expand: bool
Specifies whether the cell content can use full available width even
if the 'width' property is set.
v_expand: bool
Specifies whether the cell content can use full available height
even if the 'height' property is set.
h_align: str
Specifies the horizontal alignment of the cell content as any item
from the pero.POSITION_LRC enum. Note the for this to have any
effect the 'width' property must be specified and 'h_expand' must be
disabled.
v_align: str
Specifies vertical alignment of the cell content as any item from
the pero.POSITION_TBC enum. Note the for this to have any effect
the 'height' property must be specified and 'v_expand' must be
disabled.
line properties:
Includes pero.LineProperties to specify the cell background
outline.
fill properties:
Includes pero.FillProperties to specify the cell background fill.
frame: pero.Frame
Specifies the cell frame. This value is typically calculated and set
automatically by the layout manager.
content: pero.Frame
Specifies the cell content frame. This value is typically calculated
and set automatically by the layout manager.
"""
graphics = Property(UNDEF, types=(Graphics,), dynamic=False)
row = IntProperty(UNDEF, dynamic=False)
col = IntProperty(UNDEF, dynamic=False)
row_span = IntProperty(1, dynamic=False)
col_span = IntProperty(1, dynamic=False)
width = NumProperty(UNDEF, dynamic=False)
height = NumProperty(UNDEF, dynamic=False)
padding = QuadProperty(0, dynamic=False)
clip = BoolProperty(True)
h_expand = BoolProperty(True)
v_expand = BoolProperty(True)
h_align = EnumProperty(POS_CENTER, enum=POSITION_LRC, dynamic=False)
v_align = EnumProperty(POS_CENTER, enum=POSITION_TBC, dynamic=False)
outline = Include(LineProperties, line_color=None)
fill = Include(FillProperties, fill_color=None)
frame = FrameProperty(UNDEF, dynamic=False)
content = FrameProperty(UNDEF, dynamic=False)
def __init__(self, **overrides):
"""Initializes a new instance of layout Cell."""
super().__init__(**overrides)
self._content_origin = (0, 0)
# lock properties
self.lock_property('row')
self.lock_property('col')
self.lock_property('row_span')
self.lock_property('col_span')
@property
def rows(self):
"""
Gets indices of all the rows this cell spans.
Returns:
(int,)
Tuple of rows indices.
"""
return tuple(self.row+s for s in range(self.row_span))
@property
def cols(self):
"""
Gets indices of all the columns this cell spans.
Returns:
(int,)
Tuple of columns indices.
"""
return tuple(self.col+s for s in range(self.col_span))
def draw(self, canvas, source=UNDEF, **overrides):
"""Uses given canvas to draw cell graphics."""
# check if visible
if not self.is_visible(source, overrides):
return
# get properties
tag = self.get_property('tag', source, overrides)
graphics = self.get_property('graphics', source, overrides)
frame = self.get_property('frame', source, overrides)
content = self.get_property('content', source, overrides)
clip = self.get_property('clip', source, overrides)
# set pen and brush
canvas.set_pen_by(self, source=source, overrides=overrides)
canvas.set_brush_by(self, source=source, overrides=overrides)
# start drawing group
canvas.group(tag, "layout_cell")
# draw background
canvas.draw_rect(*frame.rect)
# set clipping
if clip:
canvas.clip(Path().rect(*content.rect))
# draw graphics
if graphics:
with canvas.view(*content.rect, relative=True):
graphics.draw(canvas)
# revert clipping
if clip:
canvas.unclip()
# end drawing group
canvas.ungroup()
def arrange(self, canvas=None, source=UNDEF, **overrides):
"""Calculates and sets content frame."""
# get properties
frame = self.get_property('frame', source, overrides)
width = self.get_property('width', source, overrides)
height = self.get_property('height', source, overrides)
padding = self.get_property('padding', source, overrides)
h_expand = self.get_property('h_expand', source, overrides)
v_expand = self.get_property('v_expand', source, overrides)
h_align = self.get_property('h_align', source, overrides)
v_align = self.get_property('v_align', source, overrides)
clip = self.get_property('clip', source, overrides)
# get coords
padding = padding or (0, 0, 0, 0)
width = width or 0
height = height or 0
if not width or h_expand:
width = max(width, frame.width - padding[1] - padding[3])
if not height or v_expand:
height = max(height, frame.height - padding[0] - padding[2])
if h_align == POS_CENTER:
x = frame.cx + padding[3] - padding[1] - .5*width
elif h_align == POS_RIGHT:
x = frame.x2 - width - padding[1]
else:
x = frame.x + padding[3]
if v_align == POS_CENTER:
y = frame.cy + padding[0] - padding[2] - .5*height
elif v_align == POS_BOTTOM:
y = frame.y2 - height - padding[2]
else:
y = frame.y + padding[0]
# apply clipping
if clip:
x = max(x, frame.x + padding[3])
y = max(y, frame.y + padding[0])
width = min(width, frame.width - padding[1] - padding[3])
height = min(height, frame.height - padding[0] - padding[2])
# check flipped content
width = max(0, width)
height = max(0, height)
# set content frame
self.content = Frame(x, y, width, height)
def to_content(self, x, y):
"""
Recalculates given position from the layout coordinate system into the
cell content system.
Args:
x: float
X-coordinate to convert.
y: float
Y-coordinate to convert.
Returns:
(float, float)
X and y coordinates within cell content coordinate system.
"""
return x + self.content.x, y + self.content.y
def to_layout(self, x, y):
"""
Recalculates given position from the cell content coordinate system into
the layout system.
Args:
x: float
X-coordinate to convert.
y: float
Y-coordinate to convert.
Returns:
(float, float)
X and y coordinates within layout coordinate system.
"""
return x - self.content.x, y - self.content.y
| [
2,
220,
15622,
416,
24778,
13,
26691,
198,
2,
220,
15069,
357,
66,
8,
5780,
30183,
14201,
76,
13,
1439,
2489,
10395,
13,
198,
198,
6738,
11485,
551,
5700,
1330,
1635,
198,
6738,
11485,
6608,
1330,
1635,
198,
6738,
764,
9382,
1330,
1... | 2.077967 | 12,531 |
import os
import boto3
import json
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('dev-dataeng-meetup-table')
print(table.creation_date_time)
| [
11748,
28686,
198,
11748,
275,
2069,
18,
198,
11748,
33918,
198,
198,
67,
4989,
375,
65,
796,
275,
2069,
18,
13,
31092,
10786,
67,
4989,
375,
65,
11537,
198,
11487,
796,
6382,
375,
65,
13,
10962,
10786,
7959,
12,
7890,
1516,
12,
477... | 2.633333 | 60 |
from django.db import models, connection, Error
import hashlib
from django.contrib.auth.models import AbstractBaseUser
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
4637,
11,
13047,
198,
11748,
12234,
8019,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
27741,
14881,
12982,
628,
628
] | 3.8125 | 32 |
#!/usr/bin/env python
#
# License: BSD
# https://raw.github.com/yujinrobot/kobuki/hydro-devel/kobuki_testsuite/LICENSE
#
##############################################################################
# Imports
##############################################################################
import threading
import rospy
import math
from geometry_msgs.msg import Twist
from std_msgs.msg import String
##############################################################################
# Classes
##############################################################################
'''
implements a rotating motion.
'''
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
220,
220,
220,
220,
220,
220,
220,
198,
2,
13789,
25,
347,
10305,
198,
2,
220,
220,
3740,
1378,
1831,
13,
12567,
13,
785,
14,
24767,
18594,
305,
13645,
14,
74,
672,
11308,
14,
... | 4.282759 | 145 |
import cv2
import numpy as np
import tensorflow as tf
from keras.models import load_model
import os
from PIL import Image, ImageOps
f = open("./converted_keras/l.txt", "r")
labels = f.read().splitlines()
f.close()
# shape = (224, 224)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
model = load_model("./converted_keras/t.h5")
print(model.summary())
camera = cv2.VideoCapture(0)
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
while True:
(t, frame) = camera.read()
frame = cv2.flip(frame, 1)
roi = frame[0:250, 0:250]
# gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
# gray = cv2.GaussianBlur(roi, (7, 7), 0)
# image = ImageOps.fit(roi, (224, 224), Image.ANTIALIAS)
# image_array = np.asarray(image)
gray = cv2.resize(roi, (224, 224))
normalized_image_array = (gray.astype(np.float32) / 127.0) - 1
# res = model.predict(gray.reshape(1, 224, 224, 3))
data[0] = normalized_image_array
# prediction = np.argmax(res, axis=-1)
res = model.predict(data)
p = np.argmax(res, axis=-1)
# print(res)
# char = prediction[0]+65
char = labels[p[0]]
""" char -= 1
if char > 80:
char += 1
if char == 64:
char = 'Space'
else:
char = chr(char) """
cv2.rectangle(frame, (0, 0), (250, 250), (0, 255, 0), 2)
cv2.putText(frame, char, (300, 50), cv2.FONT_HERSHEY_SIMPLEX,
1, (225, 0, 0), 2, cv2.LINE_AA)
cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
cv2.imshow('frame', frame)
keypress = cv2.waitKey(1)
if keypress == 27:
break
camera.release()
cv2.destroyAllWindows()
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
41927,
292,
13,
27530,
1330,
3440,
62,
19849,
198,
11748,
28686,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
41472,
198,
198,
... | 2.230193 | 934 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'affinity': 'IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpecAffinity',
'node_selector': 'dict(str, str)',
'priority_class_name': 'str',
'service_account_name': 'str',
'tolerations': 'list[ComCoreosMonitoringV1AlertmanagerSpecTolerations]'
}
attribute_map = {
'affinity': 'affinity',
'node_selector': 'nodeSelector',
'priority_class_name': 'priorityClassName',
'service_account_name': 'serviceAccountName',
'tolerations': 'tolerations'
}
def __init__(self, affinity=None, node_selector=None, priority_class_name=None, service_account_name=None, tolerations=None, local_vars_configuration=None): # noqa: E501
"""IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._affinity = None
self._node_selector = None
self._priority_class_name = None
self._service_account_name = None
self._tolerations = None
self.discriminator = None
if affinity is not None:
self.affinity = affinity
if node_selector is not None:
self.node_selector = node_selector
if priority_class_name is not None:
self.priority_class_name = priority_class_name
if service_account_name is not None:
self.service_account_name = service_account_name
if tolerations is not None:
self.tolerations = tolerations
@property
def affinity(self):
"""Gets the affinity of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec. # noqa: E501
:return: The affinity of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec. # noqa: E501
:rtype: IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpecAffinity
"""
return self._affinity
@affinity.setter
def affinity(self, affinity):
"""Sets the affinity of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec.
:param affinity: The affinity of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec. # noqa: E501
:type: IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpecAffinity
"""
self._affinity = affinity
@property
def node_selector(self):
"""Gets the node_selector of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec. # noqa: E501
NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # noqa: E501
:return: The node_selector of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec. # noqa: E501
:rtype: dict(str, str)
"""
return self._node_selector
@node_selector.setter
def node_selector(self, node_selector):
"""Sets the node_selector of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec.
NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # noqa: E501
:param node_selector: The node_selector of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec. # noqa: E501
:type: dict(str, str)
"""
self._node_selector = node_selector
@property
def priority_class_name(self):
"""Gets the priority_class_name of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec. # noqa: E501
If specified, the pod's priorityClassName. # noqa: E501
:return: The priority_class_name of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec. # noqa: E501
:rtype: str
"""
return self._priority_class_name
@priority_class_name.setter
def priority_class_name(self, priority_class_name):
"""Sets the priority_class_name of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec.
If specified, the pod's priorityClassName. # noqa: E501
:param priority_class_name: The priority_class_name of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec. # noqa: E501
:type: str
"""
self._priority_class_name = priority_class_name
@property
def service_account_name(self):
"""Gets the service_account_name of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec. # noqa: E501
If specified, the pod's service account # noqa: E501
:return: The service_account_name of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec. # noqa: E501
:rtype: str
"""
return self._service_account_name
@service_account_name.setter
def service_account_name(self, service_account_name):
"""Sets the service_account_name of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec.
If specified, the pod's service account # noqa: E501
:param service_account_name: The service_account_name of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec. # noqa: E501
:type: str
"""
self._service_account_name = service_account_name
@property
def tolerations(self):
"""Gets the tolerations of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec. # noqa: E501
If specified, the pod's tolerations. # noqa: E501
:return: The tolerations of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec. # noqa: E501
:rtype: list[ComCoreosMonitoringV1AlertmanagerSpecTolerations]
"""
return self._tolerations
@tolerations.setter
def tolerations(self, tolerations):
"""Sets the tolerations of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec.
If specified, the pod's tolerations. # noqa: E501
:param tolerations: The tolerations of this IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec. # noqa: E501
:type: list[ComCoreosMonitoringV1AlertmanagerSpecTolerations]
"""
self._tolerations = tolerations
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IoCertManagerAcmeV1ChallengeSpecSolverHttp01IngressPodTemplateSpec):
return True
return self.to_dict() != other.to_dict()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
12554,
527,
3262,
274,
628,
220,
220,
220,
1400,
6764,
2810,
357,
27568,
416,
4946,
15042,
35986,
3740,
1378,
12567,
13,
785,
14,
9654,
499,
270,
10141,
14,
9654,
... | 2.5104 | 3,654 |
from django.db import models
from datetime import datetime
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
2,
13610,
534,
4981,
994,
13,
628
] | 3.826087 | 23 |
import numpy as np
import pytransform3d.visualizer as pv
import pytransform3d.trajectories as ptr
from pytransform3d.urdf import UrdfTransformManager
from mocap.cleaning import smooth_exponential_coordinates, median_filter
from mocap.dataset_loader import load_kuka_dataset, transpose_dataset
from movement_primitives.visualization import plot_pointcloud, ToggleGeometry
from movement_primitives.promp import ProMP
from gmr import GMM
plot_training_data = False
n_dims = 12
n_weights_per_dim = 10
# available contexts: "panel_width", "clockwise", "counterclockwise", "left_arm", "right_arm"
context_names = ["panel_width", "clockwise", "counterclockwise"]
#pattern = "data/kuka/20200129_peg_in_hole/csv_processed/*/*.csv"
#pattern = "data/kuka/20191213_carry_heavy_load/csv_processed/*/*.csv"
pattern = "data/kuka/20191023_rotate_panel_varying_size/csv_processed/*/*.csv"
weights, Ts, Es, contexts = generate_training_data(
pattern, n_weights_per_dim, context_names=context_names, verbose=2)
X = np.hstack((contexts, weights))
random_state = np.random.RandomState(0)
gmm = GMM(n_components=5, random_state=random_state)
gmm.from_samples(X)
n_steps = 100
T_query = np.linspace(0, 1, n_steps)
fig = pv.figure(with_key_callbacks=True)
fig.plot_transform(s=0.1)
tm = UrdfTransformManager()
with open("kuka_lbr/urdf/kuka_lbr.urdf", "r") as f:
tm.load_urdf(f.read(), mesh_path="kuka_lbr/urdf/")
fig.plot_graph(tm, "kuka_lbr", show_visuals=True)
for panel_width, color, idx in zip([0.3, 0.4, 0.5], ([1.0, 1.0, 0.0], [0.0, 1.0, 1.0], [1.0, 0.0, 1.0]), range(3)):
print("panel_width = %.2f, color = %s" % (panel_width, color))
context = np.array([panel_width, 0.0, 1.0])
conditional_weight_distribution = gmm.condition(np.arange(len(context)), context).to_mvn()
promp = ProMP(n_dims=n_dims, n_weights_per_dim=n_weights_per_dim)
promp.from_weight_distribution(
conditional_weight_distribution.mean,
conditional_weight_distribution.covariance)
mean = promp.mean_trajectory(T_query)
var = promp.var_trajectory(T_query)
samples = promp.sample_trajectories(T_query, 100, random_state)
c = [0, 0, 0]
c[idx] = 1
fig.plot_trajectory(ptr.pqs_from_transforms(ptr.transforms_from_exponential_coordinates(mean[:, :6])), s=0.05, c=tuple(c))
fig.plot_trajectory(ptr.pqs_from_transforms(ptr.transforms_from_exponential_coordinates(mean[:, 6:])), s=0.05, c=tuple(c))
pcl_points = []
distances = []
stds = []
for E in samples:
P_left = ptr.pqs_from_transforms(ptr.transforms_from_exponential_coordinates(E[:, :6]))
P_right = ptr.pqs_from_transforms(ptr.transforms_from_exponential_coordinates(E[:, 6:]))
left2base_ee_pos = P_left[:, :3]
right2base_ee_pos = P_right[:, :3]
pcl_points.extend(left2base_ee_pos)
pcl_points.extend(right2base_ee_pos)
ee_distances = np.linalg.norm(left2base_ee_pos - right2base_ee_pos, axis=1)
distances.append(np.mean(ee_distances))
stds.append(np.std(ee_distances))
print("Mean average distance of end-effectors = %.2f, mean std. dev. = %.3f"
% (np.mean(distances), np.mean(stds)))
pcl = plot_pointcloud(fig, pcl_points, color)
key = ord(str((idx + 1) % 10))
fig.visualizer.register_key_action_callback(key, ToggleGeometry(fig, pcl))
if plot_training_data:
for E in Es:
left2base_trajectory = ptr.transforms_from_exponential_coordinates(E[:, :6])
right2base_trajectory = ptr.transforms_from_exponential_coordinates(E[:, 6:])
pv.Trajectory(left2base_trajectory, s=0.02).add_artist(fig)
pv.Trajectory(right2base_trajectory, s=0.02).add_artist(fig)
fig.view_init(azim=0, elev=25)
fig.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
35636,
18,
67,
13,
41464,
7509,
355,
279,
85,
198,
11748,
12972,
35636,
18,
67,
13,
9535,
752,
1749,
355,
50116,
198,
6738,
12972,
35636,
18,
67,
13,
2799,
69,
1330,
471,
4372,
69,
4... | 2.350977 | 1,587 |
#! /usr/bin/env python
"""IOP mail extension for Seaglider Basestation
Author: Sebastian Steinke, 21.03.2017
"""
import sys
import os
from scipy.io import netcdf
import BaseNetCDF
import numpy as np
import time
import BaseOpts
from BaseLog import *
import FileMgr
from CalibConst import getSGCalibrationConstants
import CommLog as CommLog
import Utils as Utils
import Conf
import collections
import Base as Base
import re
import io
surface_pos = collections.namedtuple('surface_pos', ['gps_fix_lon', 'gps_fix_lat', 'gps_fix_time', 'dive_num', 'call_cycle'])
class MailContent:
"""Object representing content for automated mail
"""
def dump(self, fo=sys.stdout):
"""Dumps out the structure
"""
#if self.mails != None:
# print >>fo, "Mails: %s" % (self.mails)
if self.glider != None and self.mission != None:
print >>fo, "Summary for Glider: %s, mission: %s" % (self.glider, self.mission)
else:
if self.glider != None:
print >>fo, "Summary for Glider: %s" % (self.glider)
if self.mission != None:
print >>fo, "mission: %s" % (self.mission)
print >>fo, "\n*** Latest communication session: ****"
if self.comm_gps_time != None:
print >>fo, "GPS time: %s" % (self.comm_gps_time)
if self.comm_dive_call_cycle != None:
print >>fo, "Dive and call cycle: %s" % (self.comm_dive_call_cycle)
if self.comm_gps_position != None:
print >>fo, "GPS position: %s" % (self.comm_gps_position)
print >>fo, "\n*** Latest dive: ****"
if self.log_file != None:
print >>fo, "log_file: %s" % (self.log_file)
if self.nc_file != None:
print >>fo, "nc_file: %s" % (self.nc_file)
if self.dive != None:
print >>fo, "Dive: %s" % (self.dive)
if self.call_cycle != None:
print >>fo, "Call_cycle: %s" % (self.call_cycle)
if self.gps_time != None:
print >>fo, "%s" % (self.gps_time)
if self.gps_position != None:
print >>fo, "%s" % (self.gps_position)
if self.target_depth != None:
print >>fo, "Target depth [m]: %s" % (self.target_depth)
if self.max_depth != None:
print >>fo, "Max. depth [m]: %d" % (self.max_depth)
if self.end_dive_reason != None:
print >>fo, "End of dive reason: %s" % (self.end_dive_reason)
if self.target != None and self.target_latLon != None:
print >>fo, "On way to target: %s at %s, %s" % (self.target, self.target_latLon[0], self.target_latLon[1])
if self.distance_target != None:
print >>fo, "Distance to target [m]: %s" % (self.distance_target)
if self.altimeter_ping != None:
print >>fo, "Altimeter ping: %s" % (self.altimeter_ping)
if self.altimeter_bottom_depth != None:
print >>fo, "Altimeter bottom depth: %s" % (self.altimeter_bottom_depth)
if self.error_buffer_overrun != None:
print >>fo, "Error buffer overrun: %s" % (self.error_buffer_overrun)
if self.error_TT8 != None:
print >>fo, "Error TT8: %s" % (self.error_TT8)
if self.error_CFOpeningFiles != None:
print >>fo, "Error CF opening files: %s" % (self.error_CFOpeningFiles)
if self.error_CFWritingFiles != None:
print >>fo, "Error CF writing files: %s" % (self.error_CFWritingFiles)
if self.error_CFClosingFiles != None:
print >>fo, "Error CF closing files: %s" % (self.error_CFClosingFiles)
if self.retries_CFOpeningFiles != None:
print >>fo, "Retries CF opening files: %s" % (self.retries_CFOpeningFiles)
if self.retries_CFWritingFiles != None:
print >>fo, "Retries CF writing files: %s" % (self.retries_CFWritingFiles)
if self.retries_CFClosingFiles != None:
print >>fo, "Retries CF closing files: %s" % (self.retries_CFClosingFiles)
if self.error_pit != None:
print >>fo, "Error pitch: %s" % (self.error_pit)
if self.error_rol != None:
print >>fo, "Error roll: %s" % (self.error_rol)
if self.error_vbd != None:
print >>fo, "Error VBD: %s" % (self.error_vbd)
if self.retries_pit != None:
print >>fo, "Retries pitch: %s" % (self.retries_pit)
if self.retries_rol != None:
print >>fo, "Retries roll: %s" % (self.retries_rol)
if self.retries_vbd != None:
print >>fo, "Retries VBD: %s" % (self.retries_vbd)
if self.error_noGPSFix != None:
print >>fo, "Error no GPS Fix: %s" % (self.error_noGPSFix)
if self.error_sensor_timeout != None:
print >>fo, "Error sensor timeout: %s" % (self.error_sensor_timeout)
print >>fo, "\n*** Critical errors from capture file (if any): ****"
if self.critical_msg != None:
print >>fo, "%s" % (self.critical_msg)
else:
print >>fo, "None"
print >>fo, "\n*** Sensor ranges: ****"
if self.temperature != None:
print >>fo, "Temperature: %.2f - %.2f" % (self.temperature[0], self.temperature[1])
if self.salinity != None:
print >>fo, "Salinity: %.2f - %.2f" % (self.salinity[0], self.salinity[1])
if self.density != None:
print >>fo, "Density: %.2f - %.2f" % (self.density[0], self.density[1])
if self.dis_oxygen != None:
print >>fo, "Dissolved oxygen: %.2f - %.2f" % (self.dis_oxygen[0], self.dis_oxygen[1])
if self.cdom != None:
print >>fo, "CDOM: %.2f - %.2f" % (self.cdom[0], self.cdom[1])
if self.chla != None:
print >>fo, "Chlorophyll a: %.2f - %.2f" % (self.chla[0], self.chla[1])
def fill_from_log(self, logfile):
""" Get information from log file
"""
if os.path.isfile(logfile):
log_info("Reading from log file %s..." % logfile)
head, tail = os.path.split(os.path.abspath(os.path.expanduser(logfile)))
self.log_file = tail
for line in open(logfile, 'r'):
line = line.strip('\n')
# TODO: add $D_GRID (to show if bathy or target depth was used)
if re.search('\$ID,', line):
self.glider = line.split(',')[-1]
if re.search('MISSION', line):
self.mission = line.split(',')[-1]
if re.search('\$DIVE,', line):
self.dive = line.split(',')[-1]
if re.search('_CALLS', line):
self.call_cycle = line.split(',')[-1]
if re.search('TGT_NAME', line):
self.target = line.split(',')[-1]
if re.search('TGT_LATLONG', line):
self.target_latLon = line.split(',')[1:]
if re.search('D_TGT', line):
self.target_depth = line.split(',')[-1]
if re.search('\$ERRORS', line):
# Translate numbers into errors/retries
self.errors = line.split(',')[1:]
str_arr = line.split(',')[1:]
if len(str_arr) != 16:
log_error("Could not read Errors line from log file. Length != 16. Line: %s" % line,'exc')
else:
if str_arr[0] != '0':
self.error_buffer_overrun = str_arr[0]
if str_arr[1] != '0':
self.error_TT8 = str_arr[1]
if str_arr[2] != '0':
self.error_CFOpeningFiles = str_arr[2]
if str_arr[3] != '0':
self.error_CFWritingFiles = str_arr[3]
if str_arr[4] != '0':
self.error_CFClosingFiles = str_arr[4]
if str_arr[5] != '0':
self.retries_CFOpeningFiles = str_arr[5]
if str_arr[6] != '0':
self.retries_CFWritingFiles = str_arr[6]
if str_arr[7] != '0':
self.retries_CFClosingFiles = str_arr[7]
if str_arr[8] != '0':
self.error_pit = str_arr[8]
if str_arr[9] != '0':
self.error_rol = str_arr[9]
if str_arr[10] != '0':
self.error_vbd = str_arr[10]
if str_arr[11] != '0':
self.retries_pit = str_arr[11]
if str_arr[12] != '0':
self.retries_rol = str_arr[12]
if str_arr[13] != '0':
self.retries_vbd = str_arr[13]
if str_arr[14] != '0':
self.error_noGPSFix = str_arr[14]
if str_arr[15] != '0':
self.error_sensor_timeout = str_arr[15]
if re.search('MHEAD_RNG_PITCHd_Wd', line):
# get distance to target
self.distance_target = line.split(',')[2]
if re.search(',end dive', line):
self.end_dive_reason = line.split(',')[-1]
if re.search('\$ALTIM_BOTTOM_PING,', line):
str_arr = line.split(',')
if len(str_arr) == 3:
# ping and response...
self.altimeter_ping = line.split(',')[1]
self.altimeter_bottom_depth = float(line.split(',')[1]) + float(line.split(',')[2])
elif len(str_arr) == 2:
# ping and no response...
self.altimeter_ping = line.split(',')[1]
self.altimeter_bottom_depth = 'no bottom detected'
def fill_from_cap(self, capfile):
""" Get lines with critical messages from capture file
"""
if os.path.isfile(capfile):
log_info("Reading from cap file %s..." % capfile)
for line in open(capfile, 'r'):
line = line.strip('\n')
if re.search(',C,', line):
if self.critical_msg == None:
self.critical_msg = line + '\n'
else:
self.critical_msg = self.critical_msg + line + '\n'
def fill_from_comm(self, commfile, base_opts):
""" Get latest GPS fix from comm.log file
"""
(comm_log, start_post, _, _) = CommLog.process_comm_log(os.path.join(base_opts.mission_dir, 'comm.log'), base_opts)
if(comm_log == None):
log_warning("Could not process comm.log")
surface_positions = []
if(comm_log != None):
for session in comm_log.sessions:
if(session.gps_fix != None and session.gps_fix.isvalid):
surface_positions.append(surface_pos(Utils.ddmm2dd(session.gps_fix.lon),Utils.ddmm2dd(session.gps_fix.lat),time.mktime(session.gps_fix.datetime), session.dive_num, session.call_cycle))
surface_positions = sorted(surface_positions, key=lambda position: position.gps_fix_time)
last_surface_position = surface_positions[-1] if len(surface_positions) else None
if last_surface_position:
self.comm_dive_call_cycle = "Comm dive: %d:%d" % (last_surface_position.dive_num, last_surface_position.call_cycle)
self.comm_gps_position = ("GPS Fix: %.4f, %.4f" % (last_surface_position.gps_fix_lat, last_surface_position.gps_fix_lon))
self.comm_gps_time = ("Fix time " + str(time.strftime("%H:%M:%S %m/%d/%y %Z",time.gmtime(last_surface_position.gps_fix_time))))
return comm_log, last_surface_position
def main(instrument_id=None, base_opts=None, sg_calib_file_name=None, dive_nc_file_names=None, nc_files_created=None,
processed_other_files=None, known_mailer_tags=None, known_ftp_tags=None):
"""App to extract data from different basestation files and send result via mail...
Returns:
0 for success (although there may have been individual errors in
file processing).
Non-zero for critical problems.
Raises:
Any exceptions raised are considered critical errors and not expected
"""
if base_opts is None:
base_opts = BaseOpts.BaseOptions(sys.argv, 'k',
usage="%prog [Options] ")
BaseLogger("iop_mailer", base_opts)
args = BaseOpts.BaseOptions._args
if(not base_opts.mission_dir):
print main.__doc__
return 1
processing_start_time = time.time()
log_info("Started processing " + time.strftime("%H:%M:%S %d %b %Y %Z", time.gmtime(time.time())))
log_info("Config name = %s" % base_opts.config_file_name)
if(not dive_nc_file_names):
dive_nc_file_names = MakeDiveProfiles.collect_nc_perdive_files(base_opts)
content = MailContent()
# Read data from comm.log
comm_log, last_surface_position = content.fill_from_comm(os.path.join(base_opts.mission_dir, 'comm.log'), base_opts)
# Read latest netCDF file
if(dive_nc_file_names and len(dive_nc_file_names) > 0):
dive_nc_file_names.sort()
# Use last dive
dive_nc_file_name = dive_nc_file_names[-1]
content.fill_from_nc(dive_nc_file_name)
if(instrument_id == None):
if(comm_log != None):
instrument_id = comm_log.get_instrument_id()
if(instrument_id < 0 and dive_nc_file_names and len(dive_nc_file_names) > 0):
instrument_id = FileMgr.get_instrument_id(dive_nc_file_names[0])
if(instrument_id < 0):
log_error("Could not get instrument id - bailing out")
return 1
# find log file and read information:
log_name = 'p%03d%04d.log' % (instrument_id, last_surface_position.dive_num)
log_file_name = os.path.join(base_opts.mission_dir, log_name)
if os.path.isfile(log_file_name):
content.fill_from_log(log_file_name)
content.read_configuration(base_opts)
dump_file = os.path.join(base_opts.mission_dir, 'iop_mailer.txt')
try:
fo = open(dump_file, "w")
except:
log_error("Could not open %s" % dump_file)
log_error(traceback.format_exc())
content.dump(fo=fo)
fo.close()
with open(dump_file, 'r') as content_file:
mail_content = content_file.read()
content.send_mail(mail_content, base_opts)
log_info("Finished processing " + time.strftime("%H:%M:%S %d %b %Y %Z", time.gmtime(time.time())))
log_info("Run time %f seconds" % (time.time() - processing_start_time))
return 0
if __name__ == "__main__":
retval = 1
os.environ['TZ'] = 'UTC'
time.tzset()
try:
retval = main()
except Exception:
log_critical("Unhandled exception in main -- exiting")
sys.exit(retval)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
40,
3185,
6920,
7552,
329,
1001,
363,
75,
1304,
6455,
27364,
198,
220,
220,
6434,
25,
26190,
15215,
365,
11,
2310,
13,
3070,
13,
5539,
198,
37811,
198,
11748,
25064,
198,
11... | 1.974041 | 7,666 |
#################################################################
###############written by fbb, dkudrow, shillberry########################
#Reads all of the images in a directory, sorts them by Strehl
#Ratio or consecutive order, and stacks the specified top percent
#either aligned to a guide star, correlating in fourier space,
#or not aligning, weighting if requested.
#This version includes:
#detripling for compact binaries, drizzle algorithm for
#subpixel alignment and dynamic guide region.
#Type $python stacked.py -h for usage
####
#last modified 11/14/2011
#################################################################
print 'Initializing stacking sequence...'
import sys,os,time
import pyfits as PF
from numpy import *
from scipy import *
from scipy.interpolate import interp1d
from scipy.fftpack import fft2, ifft2
import threading
import multiprocessing
import Queue
import matplotlib
matplotlib.use('Agg')
from pylab import plt, axvline, savefig, subplot, figure, plot, legend, title, hist, imshow, show
sys.path.append("../LIHSPcommon")
from myutils import readconfig, mygetenv, mymkdir
from myioutils import *
from mysciutils import *
from time import time
print 'Loaded all packages'
DEBUG = False
####################################################################3
####################################################################3
######################################################################
######################################################################
def phase_correct(axis, radius, shifts, r):
'''phase_correct(axis, radius, shifts, r) In the event of a possibly incorrect shift, uses peak detection on the cross-power spectrum to find additional correlations'''
print 'Running peak detect...'
maxtab, maxmat = peakdetect2d(r, radius)
k = 1
while (abs(shifts[0]) > radius or abs(shifts[1]) > radius) \
and k < len(maxtab):
phase = maxtab[k][0]
print "phase = " + str(phase)
### Checks if image has negative shift ###
if phase[0] > axis[0]/2:
shifts[0] = phase[0] - axis[0]
else:
shifts[0] = phase[0]
if phase[1] > axis[1]/2:
shifts[1] = phase[1] - axis[1]
else:
shifts[1] = phase[1]
k+=1
print "...shift in NAXIS1 = %d" %shifts[1]
print "...shift in NAXIS2 = %d" %shifts[0]
if abs(shifts[0]) > radius or abs(axis1_shift) > radius:
print 'No good correlation found. Omitting frame'
_OMIT_ = True
error.write('No good correlation found for ' + fnames[i] + ' ... frame omitted.\n')
return [shifts[0], shifts[1], _OMIT_]
######################################################################
def combine(shifts, images):
'''Parameters: [axis2_shift,axis1_shift] and [image0,image1] to be combined according to appropriate shifts. Shifts correspond to image1 correlated to image0'''
axis2_shift = shifts[0]
axis1_shift = shifts[1]
stack = images[0]
fitspad = images[1]
if axis2_shift >= 0:
if axis2_shift == 0: axis2_shift = -fitspad.shape[0]
if axis1_shift >= 0:
if axis1_shift == 0: axis1_shift = -fitspad.shape[1]
stack[axis2_shift:,axis1_shift:] += \
fitspad[:-axis2_shift,:-axis1_shift]
else: #axis1_shift < 0
stack[axis2_shift:,:-abs(axis1_shift)] += \
fitspad[:-axis2_shift,abs(axis1_shift):]
else: #axis2_shift < 0
if axis1_shift >= 0:
if axis1_shift == 0: axis1_shift = -fitspad.shape[1]
stack[:-abs(axis2_shift),axis1_shift:] += \
fitspad[abs(axis2_shift):,:-axis1_shift]
else: #axis1_shift < 0
stack[:-abs(axis2_shift),:-abs(axis1_shift)] += \
fitspad[abs(axis2_shift):,abs(axis1_shift):]
return stack
######################################################################
def makefft(data, fast):
'''Quick operator to make fast fourier transform (FFT) either using PyFFTW3 (if available) or otherwise scipy.fftpack'''
if fast:
data = np.array(data, complex)
fft = np.zeros(data.shape, complex)
plan = fftw3.Plan(data, fft, 'forward')
plan.execute()
else:
fft = fft2(data)
return fft
######################################################################
queue = Queue.Queue()
######################################################################
######################################################################
#######################################################################
########################################################
########################################################
########################################################
########################################################
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser(usage='makelucky.py -c configfilename', conflict_handler="resolve")
parser.add_option('-c','--configfile' , type="string",
help='full name to configuration file')
parser.add_option('--debug', default=False, action="store_true",
help='debug')
options, args = parser.parse_args()
if len(args)>0 or options.configfile == None:
sys.argv.append('--help')
options, args = parser.parse_args()
print """Usage. Requires:
**name of parameter file conatining :**
Directory containing images
dark file
dark method
Guide star x coordinate
Guide star y coordinate
region dimensions x
percentage of images to be selected (0-100)
lucky: \'lucky\',\'weighted\', \'coadded\', \'corr\'
shift: \'align\' or \'none\'
detripling: \'v\' or \'h\' or \'none\'
minimum separation or cores
core size for detripling (odd integer)
dynamic guide region; 1 or 0
saturation
fftw3: \'y\' to use PyFFTW3 package, \
\'n\' to use normal SciPy FFTpack
"""
sys.exit()
if options.debug:
DEBUG=True
##### DECLARE VARIABLES #####
pars = readconfig(options.configfile)
gsx=pars['x']
gsy=pars['y']
rad=pars['r']
select=pars['sel']
pc = pars['percent']
ps = pars['ps']
shift=pars['align']
detrip=pars['detrip']
minsep=float(pars['separation'])
coresz=pars['core']
follow=pars['centroid']
saturation=float(pars['saturation'])
global fast
fast = pars['fftw3'].startswith('y')
nameroot=pars['spool'][0]
if nameroot.endswith('.fits'):
nameroot = nameroot[:-5]
print nameroot
global outpath
if len(pars['spool'])>1:
outpath = '%s/%s_all'%(mygetenv('SPEEDYOUT'),nameroot)
else:
outpath = '%s/%s'%(mygetenv('SPEEDYOUT'),nameroot)
inpath='%s/unspooled/' %(outpath)
#inpath='/science/fbianco/LIdata/%s/unspooled'%(nameroot)
if len(select[0]) == 1:
print "\nprocessing %s\n\n\n"%select
ret = createstack(inpath,gsx,gsy,rad,select,pc,shift,detrip,minsep,outpath,coresz,follow,ps, saturation, fast)
else:
for sel in select:
createstack(inpath,gsx,gsy,rad,sel,pc,shift,detrip,minsep,outpath,coresz,follow,ps, saturation, fast)
| [
29113,
29113,
2,
198,
7804,
4242,
21017,
15266,
416,
277,
11848,
11,
288,
74,
463,
808,
11,
427,
359,
8396,
14468,
7804,
198,
2,
5569,
82,
477,
286,
262,
4263,
287,
257,
8619,
11,
10524,
606,
416,
9737,
18519,
198,
2,
29665,
952,
... | 2.627234 | 2,798 |
__all__ = ["reservation", "user_info"]
for _import in __all__:
__import__(__package__ + "." + _import)
| [
834,
439,
834,
796,
14631,
411,
13208,
1600,
366,
7220,
62,
10951,
8973,
198,
198,
1640,
4808,
11748,
287,
11593,
439,
834,
25,
198,
220,
220,
220,
11593,
11748,
834,
7,
834,
26495,
834,
1343,
366,
526,
1343,
4808,
11748,
8,
198
] | 2.571429 | 42 |
from .common import load_source_list
import tensorflow as tf
import numpy as np
import ot
import os
import pickle
import argparse
g_centering_trick = False # doesn't change results much
g_sinkhorn_reg = 0.1
| [
6738,
764,
11321,
1330,
3440,
62,
10459,
62,
4868,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
30972,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
1822,
29572,
198,
198,
70,
6... | 3.102941 | 68 |
#!/usr/bin/env python
import glob
import re
import sys
# Format generated .htm to have correct syntax and additional styling.
STYLE = """\
<style type=\"text/css\"><!--
body {
font-family: Verdana;
font-size: 10pt;
}
td {
font-family: Verdana;
font-size: 10pt;
}
a {
font-weight: bold
}
--></style>"""
endhead_re = re.compile(r"</title>\s*(</head>)", re.MULTILINE|re.IGNORECASE)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
15095,
198,
11748,
302,
198,
11748,
25064,
198,
198,
2,
18980,
7560,
764,
19211,
284,
423,
3376,
15582,
290,
3224,
35517,
13,
198,
198,
2257,
56,
2538,
796,
37227,
59,
198,... | 2.389474 | 190 |
from microbit import *
P0_MAX = 812
# main program
while True:
reading = pin0.read_analog()
display.clear()
barchart(4, reading, P0_MAX)
if button_a.was_pressed():
pin2.write_digital(0) # off
if button_b.was_pressed():
pin2.write_digital(1) # on
sleep(1000) # 1 second
| [
6738,
4580,
2545,
1330,
1635,
198,
198,
47,
15,
62,
22921,
796,
807,
1065,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
2,
1388,
1430,
198,
4514,
6407,
25,
198,
220,
220,
220,
3555,
796,
6757,
15,
13,
961,
62,
272,
11794,
3... | 2.06135 | 163 |
"""
Copyright 2020 Kartik Sharma
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import wrappers
from register.register import FUNNEL
"""Funnel Interface class"""
class Funnel(object):
"""Funnel.
Funnel Class which gets the required Funnel given in
configuration.
"""
def __init__(self, data_path, config, datatype="bbox", training=True):
# pylint: disable=line-too-long
"""__init__.
Args:
data_path: Data path in structured format,please see readme file
for more information.
config: Config passed as dict instance containing all required.
datatype: Dataset type e.g ['bbox','categorical','segmentation'],
bbox - Bounding Box dataset containing object detection
data. i.e x1,y1,x2,y2
categorical - Categorical data i.e categorical
(multi class) or binary (two class) for
Classification problems.
Example:
**********************************************************
from TensorPipe.pipe import Funnel
funnel = Funnel('testdata',config=config,datatype='categorical')
# high performance with parallelism tf.data iterable.
dataset = funnel.dataset(type = 'train')
for data in dataset:
# feed the data to NN or visualize.
print(data[0])
"""
# pylint: enable=line-too-long
if datatype not in wrappers.ALLOWED_TYPES:
raise TypeError(
"datasettype not in ALLOWED_TYPEs, please check\
allowed dataset i.e bbox,classification labels,\
segmentation."
)
_funnel_class = FUNNEL.get(datatype)
self._funnel = _funnel_class(data_path, config, datatype=datatype,training=training)
def dataset(self, type: str = "Train") -> tf.data:
"""dataset.
Dataset function which provides high performance tf.data
iterable, which gives tuple comprising (x - image, y - labels)
Iterate over the provided iterable to for feeding into custom
training loop for pass it to keras model.fit.
Args:
type: Subset data for the current dataset i.e train,val,test.
"""
if type.lower() not in ["train", "val", "test", "validation"]:
raise Exception("Subset Data you asked is not a valid portion")
# high performance tf.data iterable
iterable = self._funnel.dataset(type)
return iterable
| [
37811,
198,
220,
220,
15069,
12131,
32872,
1134,
40196,
628,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
220,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
1378... | 2.344521 | 1,460 |
from dashapp.functions import *
from dashapp.layouts import *
| [
6738,
14470,
1324,
13,
12543,
2733,
1330,
1635,
198,
6738,
14470,
1324,
13,
10724,
5269,
1330,
1635,
198
] | 3.444444 | 18 |
from django.core.cache import cache
from django.test import TestCase as DjangoTestCase
from django.conf import settings
from scheduler import mock
from scheduler.utils import generate_random_name
| [
6738,
42625,
14208,
13,
7295,
13,
23870,
1330,
12940,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
355,
37770,
14402,
20448,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6738,
6038,
18173,
1330,
15290,
198,
6738,
603... | 3.96 | 50 |
import logging
import pytest
from bluesky import RunEngine
from bluesky.plan_stubs import close_run, open_run, stage, unstage
from ophyd.sim import make_fake_device
from ophyd.status import wait as status_wait
from pcdsdevices.epics_motor import (IMS, PMC100, BeckhoffAxis, EpicsMotor,
EpicsMotorInterface, Motor,
MotorDisabledError, Newport,
PCDSMotorBase, OffsetMotor,
OffsetIMSWithPreset)
logger = logging.getLogger(__name__)
def fake_class_setup(cls):
"""
Make the fake class and modify if needed
"""
FakeClass = make_fake_device(cls)
return FakeClass
def motor_setup(motor):
"""
Set up the motor based on the class
"""
if isinstance(motor, EpicsMotorInterface):
motor.user_readback.sim_put(0)
motor.high_limit_travel.put(100)
motor.low_limit_travel.put(-100)
motor.user_setpoint.sim_set_limits((-100, 100))
if isinstance(motor, PCDSMotorBase):
motor.motor_spg.sim_put(2)
motor.motor_spg.sim_set_enum_strs(['Stop', 'Pause', 'Go'])
if isinstance(motor, IMS):
motor.bit_status.sim_put(0)
motor.part_number.sim_put('PN123')
motor.error_severity.sim_put(0)
motor.reinit_command.sim_put(0)
def fake_motor(cls):
"""
Given a real class, lets get a fake motor
"""
FakeCls = fake_class_setup(cls)
motor = FakeCls('TST:MTR', name='test_motor')
motor_setup(motor)
return motor
# Here I set up fixtures that test each level's overrides
# Test in subclasses too to make sure we didn't break it!
@pytest.fixture(scope='function',
params=[EpicsMotorInterface, PCDSMotorBase, IMS, Newport,
PMC100, BeckhoffAxis])
def fake_epics_motor(request):
"""
Test EpicsMotorInterface and subclasses
"""
return fake_motor(request.param)
@pytest.fixture(scope='function',
params=[PCDSMotorBase, IMS, Newport, PMC100])
def fake_pcds_motor(request):
"""
Test PCDSMotorBase and subclasses
"""
return fake_motor(request.param)
@pytest.fixture(scope='function')
def fake_ims():
"""
Test IMS-specific overrides
"""
return fake_motor(IMS)
@pytest.fixture(scope='function')
def fake_beckhoff():
"""
Test Beckhoff-specific overrides
"""
return fake_motor(BeckhoffAxis)
@pytest.fixture(scope='function')
@pytest.fixture(scope='function')
@pytest.mark.parametrize("cls", [PCDSMotorBase, IMS, Newport, PMC100,
BeckhoffAxis, EpicsMotor])
@pytest.mark.timeout(5)
@pytest.mark.parametrize("cls", [OffsetMotor, OffsetIMSWithPreset])
@pytest.mark.timeout(5)
| [
11748,
18931,
198,
198,
11748,
12972,
9288,
198,
6738,
25570,
2584,
1330,
5660,
13798,
198,
6738,
25570,
2584,
13,
11578,
62,
301,
23161,
1330,
1969,
62,
5143,
11,
1280,
62,
5143,
11,
3800,
11,
555,
14247,
198,
6738,
267,
746,
5173,
1... | 2.236465 | 1,256 |
# import sys, time
# class TextIntonationDecoder:
# color_start = '\u001b' # ansi code start
# color_end = 'm' # ansi code end
# pause_code = '!pause'
# @classmethod
# def get_pause_code(cls):
# return cls.pause_code
# @classmethod
# def parse_color(cls, s:str):
# """Returns sequence of ansi codes and pure strings."""
# parsed = []
# p = ''
# found_ansi = False
# i = 0
# while i < len(s):
# c = s[i]
# if c == cls.color_start:
# if p:
# parsed.append(p)
# p = ''
# while c != cls.color_end and i < len(s):
# c = s[i]
# p += c
# i += 1
# parsed.append(p)
# p = ''
# else:
# while i < len(s):
# if s[i] != cls.color_start:
# p += s[i]
# i += 1
# else:
# break
# parsed.append(p)
# p = ''
# return tuple(parsed)
# @classmethod
# def is_color(cls, s:str):
# return s.startswith(cls.color_start) and s.endswith(cls.color_end)
# @classmethod
# def parse_pause(cls, s:str):
# """Returns tuple of strings."""
# pause = cls.pause_code
# if s.find(pause) > -1:
# parsed = []
# for i, word in enumerate(s.split(pause)):
# if word:
# parsed.append(word)
# parsed.append(pause)
# if not s.endswith(pause): # remove extra !pause at end
# parsed.pop()
# else:
# parsed = (s,)
# return tuple(parsed)
# @classmethod
# def is_pause(cls, s:str):
# return s == cls.pause_code
# @classmethod
# def parse(cls, s:str):
# parsed_pauses = cls.parse_pause(s)
# parsed = []
# for part in parsed_pauses:
# parsed.extend(cls.parse_color(part))
# return tuple(parsed)
# class ProphetView:
# char_pause = 0.02
# word_pause = 0.8
# intonation_decoder = TextIntonationDecoder
# """Defines printing utilities."""
# @classmethod
# def pause(cls, duration):
# time.sleep(duration)
# @classmethod
# def intonated_print(cls, phrase:str, need_cr:bool=True, wpause:float=None, cpause:float=None):
# """Phrase is intonation string."""
# idec = cls.intonation_decoder
# if isinstance(phrase, str):
# if idec.is_pause(phrase) or idec.is_color(phrase):
# raise Exception('Passed color or pause code to print:' + repr(phrase))
# else:
# cls._animated_print_str(phrase, cpause)
# else:
# wpause = cls.word_pause if wpause is None else wpause
# for part in phrase:
# if idec.is_color(part):
# sys.stdout.write(part)
# elif idec.is_pause(part):
# time.sleep(wpause)
# else:
# cls._animated_print_str(part, cpause)
# if need_cr:
# print()
# @classmethod
# def _animated_print_str(cls, s:str, cpause:float=None):
# cpause = cls.char_pause if cpause is None else cpause
# for c in s:
# sys.stdout.write(c)
# sys.stdout.flush()
# time.sleep(cpause)
# @classmethod
# def _clean_stdout(cls, length:int):
# """Removes last length characters from stdout."""
# sys.stdout.write('\b'*length)
# sys.stdout.write(' ' *length)
# sys.stdout.write('\b'*length)
# sys.stdout.flush()
# @classmethod
# def looped_print(cls, prompt:str='', frames=None, duration:float=5.0, fps=5):
# """Prints `<prompt><frame>` where <frame> changes inplace during duration (sec).
# Print of <prompt> does not count to duration.
# frames - must support next(), each item is str
# """
# cls._animated_print_str(prompt)
# pause = 1.0 / fps
# tstart = time.time()
# frame = ''
# written = 0
# while time.time() - tstart < duration:
# frame = next(frames)
# for part in frame:
# if cls.intonation_decoder.is_color(part):
# sys.stdout.write(part)
# else:
# written += sys.stdout.write(part)
# sys.stdout.flush()
# time.sleep(pause)
# cls._clean_stdout(written)
# written = 0
# cls._clean_stdout(len(prompt) + written)
| [
2,
1330,
25064,
11,
640,
628,
198,
2,
1398,
8255,
5317,
261,
341,
10707,
12342,
25,
198,
2,
220,
220,
220,
220,
3124,
62,
9688,
796,
705,
59,
84,
8298,
65,
6,
220,
1303,
9093,
72,
2438,
923,
198,
2,
220,
220,
220,
220,
3124,
6... | 1.771703 | 2,707 |
from __future__ import annotations
from homeassistant.components.climate import (
DOMAIN as PLATFORM_CLIMATE,
ClimateEntity,
)
from homeassistant.components.climate.const import (
PRESET_AWAY, PRESET_COMFORT, PRESET_SLEEP, SUPPORT_PRESET_MODE, SUPPORT_TARGET_TEMPERATURE,
CURRENT_HVAC_HEAT, CURRENT_HVAC_IDLE, CURRENT_HVAC_OFF,
HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_OFF,
)
from homeassistant.components.number import (
DOMAIN as PLATFORM_NUMBER,
NumberEntity,
)
from homeassistant.components.number.const import (
MODE_BOX,
)
from homeassistant.const import (
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
ENTITY_CATEGORY_CONFIG,
)
from ..meross_entity import _MerossEntity
from ..merossclient import const as mc # mEROSS cONST
MTS100_MODE_CUSTOM = 0
MTS100_MODE_COMFORT = 1 # aka 'Heat'
MTS100_MODE_SLEEP = 2 # aka 'Cool'
MTS100_MODE_AWAY = 4 # aka 'Economy'
MTS100_MODE_AUTO = 3
PRESET_OFF = 'off'
PRESET_CUSTOM = 'custom'
#PRESET_COMFORT = 'heat'
#PRESET_COOL = 'cool'
#PRESET_ECONOMY = 'economy'
PRESET_AUTO = 'auto'
# map mts100 mode enums to HA preset keys
MODE_TO_PRESET_MAP = {
MTS100_MODE_CUSTOM: PRESET_CUSTOM,
MTS100_MODE_COMFORT: PRESET_COMFORT,
MTS100_MODE_SLEEP: PRESET_SLEEP,
MTS100_MODE_AWAY: PRESET_AWAY,
MTS100_MODE_AUTO: PRESET_AUTO
}
# reverse map
PRESET_TO_MODE_MAP = {
PRESET_CUSTOM: MTS100_MODE_CUSTOM,
PRESET_COMFORT: MTS100_MODE_COMFORT,
PRESET_SLEEP: MTS100_MODE_SLEEP,
PRESET_AWAY: MTS100_MODE_AWAY,
PRESET_AUTO: MTS100_MODE_AUTO
}
# when HA requests an HVAC mode we'll map it to a 'preset'
HVAC_TO_PRESET_MAP = {
HVAC_MODE_OFF: PRESET_OFF,
HVAC_MODE_HEAT: PRESET_CUSTOM,
HVAC_MODE_AUTO: PRESET_AUTO
}
# when setting target temp we'll set an appropriate payload key
# for the mts100 depending on current 'preset' mode.
# if mts100 is in any of 'off', 'auto' we just set the 'custom'
# target temp but of course the valve will not follow
# this temp since it's mode is not set to follow a manual set
PRESET_TO_TEMPKEY_MAP = {
PRESET_OFF: mc.KEY_CUSTOM,
PRESET_CUSTOM: mc.KEY_CUSTOM,
PRESET_COMFORT: mc.KEY_COMFORT,
PRESET_SLEEP: mc.KEY_ECONOMY,
PRESET_AWAY: mc.KEY_AWAY,
PRESET_AUTO: mc.KEY_CUSTOM
}
PRESET_TO_ICON_MAP = {
PRESET_COMFORT: 'mdi:sun-thermometer',
PRESET_SLEEP: 'mdi:power-sleep',
PRESET_AWAY: 'mdi:bag-checked',
}
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
1363,
562,
10167,
13,
5589,
3906,
13,
42570,
1330,
357,
198,
220,
220,
220,
24121,
29833,
355,
9297,
1404,
21389,
62,
5097,
3955,
6158,
11,
198,
220,
220,
220,
13963,
32398,
11,
19... | 2.25588 | 1,063 |
import time
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import gpflow
from gpflow.utilities import set_trainable
def elbo_fullcov(q_mu,
q_sqrt_latent,
inducing_inputs,
D_idxs,
max_idxs,
kernel,
inputs,
indifference_threshold,
standard_mvn_samples=None,
n_sample=1000):
"""
Calculates the ELBO for the PBO formulation, using a full covariance matrix.
:param q_mu: tensor with shape (num_inducing, 1)
:param q_sqrt_latent: tensor with shape (1, num_inducing, num_inducing). Will be forced into lower triangular
matrix such that q_sqrt @ q_sqrt^T represents the covariance matrix of inducing variables
:param inducing_inputs: tensor with shape (num_inducing, input_dims)
:param D_idxs: tensor with shape (num_data, num_choices, 1)
Input data points, that are indices into q_mu and q_var for tf.gather_nd
:param max_idxs: tensor with shape (num_data, 1)
Selection of most preferred input point for each collection of data points, that are indices into
q_mu and q_var
:param kernel: gpflow kernel to calculate covariance matrix for KL divergence
:param inputs: tensor of shape (num_inputs, input_dims) with indices corresponding to that of D_idxs and max_idxs
:return: tensor of shape ()
"""
Kmm = kernel.K(inducing_inputs)
logdet_Kmm = tf.linalg.logdet(Kmm)
invKmm = cholesky_matrix_inverse(Kmm)
num_data = D_idxs.size()
num_inducing = tf.shape(inducing_inputs)[0]
num_input = tf.shape(inputs)[0]
standard_mvn = tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros(num_input, dtype=tf.float64),
scale_diag=tf.ones(num_input, dtype=tf.float64))
standard_mvn_samples = standard_mvn.sample(n_sample)
# (n_sample, num_inducing)
q_sqrt = tf.linalg.band_part(q_sqrt_latent, -1, 0) # Force into lower triangular
q_full = q_sqrt @ tf.linalg.matrix_transpose(q_sqrt) # (1, num_data, num_data)
# inv_q_full = cholesky_matrix_inverse(tf.squeeze(q_full, axis=0))
# logdet_q_full = tf.linalg.logdet(q_full)
# q(f) = \int p(f|u)q(u)du
f_mean, f_cov = q_f(q_mu, q_full, inducing_inputs, kernel, inputs, Kmm, invKmm)
# f_mean: (num_input,)
# f_cov: (num_input, num_input)
if standard_mvn_samples is None:
standard_mvn = tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros(num_input, dtype=tf.float64),
scale_diag=tf.ones(num_input, dtype=tf.float64))
standard_mvn_samples = standard_mvn.sample(n_sample)
# (n_sample, num_input)
# eigvalues, eigvects = tf.linalg.eigh(f_cov)
# eigvalues = tf.clip_by_value(eigvalues, clip_value_min=0., clip_value_max=np.infty)
# transform_mat = eigvects @ tf.linalg.diag(tf.sqrt(eigvalues))
transform_mat = tf.linalg.cholesky(f_cov)
# (num_input, num_input)
zero_mean_f_samples = tf.squeeze(transform_mat @ tf.expand_dims(standard_mvn_samples, axis=-1), axis=-1)
f_samples = zero_mean_f_samples + f_mean
# (n_sample, num_input)
# KL[q(u) || p(u)] = KL[q(f) || p(f)] = E_{q(f)} log [q(f) / p(f)]
transform_mat_inv = tf.linalg.triangular_solve(transform_mat, tf.eye(num_input, dtype=tf.float64))
f_cov_inv = tf.linalg.matrix_transpose(transform_mat_inv) @ transform_mat_inv
# (num_input, num_input)
logdet_f_cov = tf.linalg.logdet(f_cov)
f_cov_prior = kernel.K(inputs)
f_cov_prior_inv = cholesky_matrix_inverse(f_cov_prior)
logdet_f_cov_prior = tf.linalg.logdet(f_cov_prior)
klterm = tf.reduce_mean(
-0.5 * (#num_input * tf.math.log(2.0 * np.pi) +
logdet_f_cov
+ zero_mean_f_samples @ f_cov_inv @ tf.linalg.matrix_transpose(zero_mean_f_samples))
+0.5 * (#num_input * tf.math.log(2.0 * np.pi) +
logdet_f_cov_prior
+ f_samples @ f_cov_prior_inv @ tf.linalg.matrix_transpose(f_samples))
)
cond = lambda i, _: i < num_data
_, likelihood = tf.while_loop(
cond,
body,
(0, tf.constant(0.0, dtype=tf.float64)),
parallel_iterations=30)
elbo = likelihood - klterm
return elbo
def cholesky_matrix_inverse(A):
"""
:param A: Symmetric positive-definite matrix, tensor of shape (n, n)
:return: Inverse of A, tensor of shape (n, n)
"""
L = tf.linalg.cholesky(A)
L_inv = tf.linalg.triangular_solve(L, tf.eye(A.shape[0], dtype=tf.float64))
return tf.linalg.matrix_transpose(L_inv) @ L_inv
def p_f_given_u(inducing_vars, inducing_inputs, kernel, inputs, invKmm_prior):
"""
Calculates the mean and covariance of the joint distribution p(f|u)
:param inducing_vars: tensor with shape (nsample,num_inducing,1)
:param inducing_inputs: tensor with shape (num_inducing, input_dims)
:param kernel: gpflow kernel to calculate covariance matrix for KL divergence
:param inputs: tensor of shape (num_inputs, input_dims) with indices corresponding to that of D_idxs and max_idxs
:return: (tensor of shape (nsample,num_inputs), tensor of shape (num_inputs, num_inputs))
"""
Knm = kernel.K(inputs, inducing_inputs) # (n, m)
A = Knm @ invKmm_prior # (n, m)
f_mean = tf.squeeze(A @ inducing_vars, axis=-1)
# (nsample, num_inducing)
Knn = kernel.K(inputs)
f_cov = Knn - A @ tf.transpose(Knm)
# (num_inputs, num_inputs)
return f_mean, f_cov
def q_f(q_mu, q_full, inducing_variables, kernel, inputs, Kmm, Kmm_inv):
"""
Calculates the mean and covariance of the joint distribution q(f)
:param q_mu: tensor with shape (num_inducing, 1)
:param q_sqrt_latent: tensor with shape (1, num_inducing, num_inducing). Will be forced into lower triangular
matrix such that q_sqrt @ q_sqrt^T represents the covariance matrix of inducing variables
:param inducing_variables: tensor with shape (num_inducing, input_dims)
:param kernel: gpflow kernel to calculate covariance matrix for KL divergence
:param inputs: tensor of shape (num_inputs, input_dims) with indices corresponding to that of D_idxs and max_idxs
:return: (tensor of shape (num_inputs), tensor of shape (num_inputs, num_inputs))
"""
Knm = kernel.K(inputs, inducing_variables) # (n, m)
A = Knm @ Kmm_inv # (n, m)
f_mean = tf.squeeze(A @ q_mu, axis=-1)
Knn = kernel.K(inputs)
S = tf.squeeze(q_full, axis=0)
f_cov = Knn + (A @ (S - Kmm) @ tf.linalg.matrix_transpose(A))
return f_mean, f_cov
def populate_dicts(D_vals):
"""
Populates dictionaries to assign an index to each value seen in the training data.
:param D_vals: [k] list of 2-d ndarray [:,d] (to allow different num_choice for different observations)
"""
idx_to_val_dict = {}
val_to_idx_dict = {}
D_vals_list = np.concatenate(D_vals, axis=0)
D_vals_list_tuples = [tuple(i) for i in D_vals_list]
D_vals_set = set(D_vals_list_tuples)
for val in D_vals_set:
val_to_idx_dict[val] = len(val_to_idx_dict)
idx_to_val_dict[len(val_to_idx_dict)-1] = val
return idx_to_val_dict, val_to_idx_dict
def val_to_idx(D_vals, max_vals, val_to_idx_dict):
"""
Converts training data from real values to index format using dictionaries.
Returns D_idxs (tensor with shape (k, num_choices, 1))
and max_idxs (tensor with shape (k, 1)):
max_idxs[i,0] is argmax of D_idxs[i,:,0]
:param D_vals: [k] list of ndarray [:,d]
:param max_vals: [k] list of ndarray [1,d]
"""
k = len(D_vals)
max_idxs = np.zeros(k, dtype=np.int32)
for i in range(k):
if max_vals[i] is not None:
diff = np.sum(np.square(D_vals[i] - max_vals[i]), axis=1)
max_idxs[i] = np.where(diff < 1e-30)[0]
else:
max_idxs[i] = -1
max_idxs = tf.constant(max_idxs)
D_idxs = tf.TensorArray(dtype=tf.int32, size=k, name='D_idxs', infer_shape=False, clear_after_read=False)
for i in range(k):
np.stack([ [val_to_idx_dict[tuple(datum)]] for datum in D_vals[i] ])
cond = lambda i, _: i < k
body = lambda i, D_idxs: \
(i+1,
D_idxs.write(
i,
tf.constant([ val_to_idx_dict[tuple(datum)] for datum in D_vals[i] ], dtype=tf.int32)
)
)
_, D_idxs = tf.while_loop(cond, body, (0, D_idxs))
return D_idxs, max_idxs
def init_inducing_vars(input_dims, num_inducing, obj_low, obj_high):
"""
Initialize inducing variables. We create a uniform grid of points within the hypercube, then take num_inducing
number of random points from that grid.
:param input_dims: int
:param num_inducing: int
:param obj_low: float
:param obj_high: float
:return: tensor of shape (num_inducing, input_dims)
"""
if input_dims == 1:
return np.expand_dims(np.linspace(obj_low, obj_high, num_inducing + 2)[1:num_inducing + 2 - 1], axis=1)
else:
# Figure out minimum number of discrete per dim required
num_discrete_per_dim = int(np.ceil(np.sqrt(num_inducing-1))) + 1
num_points = num_discrete_per_dim ** input_dims
grid = np.zeros([num_points, input_dims])
discrete_points = np.linspace(obj_low, obj_high, num_discrete_per_dim + 2)[1: num_discrete_per_dim + 2 - 1]
for i in range(num_points):
for dim in range(input_dims):
val = num_discrete_per_dim ** (dim)
grid[i, dim] = discrete_points[int((i // val) % num_discrete_per_dim)]
# Take num_inducing random points from grid
indices = np.random.choice(num_points, num_inducing, replace=False)
return np.take(grid, indices, axis=0)
def train_model_fullcov(X,
y,
num_inducing,
obj_low,
obj_high,
deterministic=False,
n_sample = 1000,
lengthscale_lower_bound=gpflow.default_jitter(),
num_steps=5000,
indifference_threshold=0.0,
inducing_vars=None,
regularizer_lengthscale_mean_over_range=0.5,
regularizer_lengthscale_std_over_range=0.35):
"""
if indifference_threshold is None:
indifference_threshold is trained with maximum likelihood estimation
else:
indifference_threshold is fixed
:param X: np array with shape (num_data, num_choices, input_dims). Ordinal data
:param y: np array with shape (num_data, input_dims). Most preferred input for each set of inputs. Each y value must
match exactly to one of the choices in its corresponding X entry
:param num_inducing: number of inducing variables to use
:param obj_low: int. Floor of possible inducing point value in each dimension
:param obj_high: int. Floor of possible inducing point value in each dimension
:param lengthscale: float. Lengthscale to initialize RBF kernel with
:param lengthscale_prior: tensorflow_probability distribution
:param num_steps: int that specifies how many optimization steps to take when training model
:param indifference_threshold:
"""
input_dims = X.shape[2]
idx_to_val_dict, val_to_idx_dict = populate_dicts(X)
D_idxs, max_idxs = val_to_idx(X, y, val_to_idx_dict)
n = len(val_to_idx_dict.keys())
inputs = np.array([idx_to_val_dict[i] for i in range(n)])
num_input = inputs.shape[0]
# Initialize variational parameters
q_mu = tf.Variable(np.zeros([num_inducing, 1]), name="q_mu", dtype=tf.float64)
q_sqrt_latent = tf.Variable(np.expand_dims(np.eye(num_inducing), axis=0), name="q_sqrt_latent", dtype=tf.float64)
kernel = gpflow.kernels.RBF(lengthscale=[1.0 for i in range(input_dims)])
if lengthscale_lower_bound is not None:
kernel.lengthscale.transform = gpflow.utilities.bijectors.positive(lower=lengthscale_lower_bound)
# if inducing_vars is None:
# inducing_vars = init_inducing_vars(input_dims, num_inducing, obj_low, obj_high)
#
# if isinstance(inducing_vars, np.ndarray):
# init_inducing_vars = inducing_vars
# else:
# init_inducing_vars = inducing_vars.numpy()
u = tf.Variable(inducing_vars,
name="u",
dtype=tf.float64,
constraint=lambda x: tf.clip_by_value(x, obj_low, obj_high))
is_threshold_trainable = (indifference_threshold is None)
if is_threshold_trainable:
indifference_threshold = tf.Variable(0.1, dtype=tf.float64,
constraint=lambda x: tf.clip_by_value(x,
clip_value_min=0.0,
clip_value_max=np.infty))
else:
indifference_threshold = tf.constant(indifference_threshold, dtype=tf.float64)
if deterministic:
standard_mvn_samples = tf.constant(np.random.randn(n_sample, num_input), dtype=tf.float64)
else:
standard_mvn_samples = None
input_range = obj_high - obj_low
lengthscale_mean_regularizer = input_range * regularizer_lengthscale_mean_over_range
lengthscale_std_regularizer = input_range * regularizer_lengthscale_std_over_range
lengthscale_regularizer = 0.5 * tf.reduce_sum(tf.square((kernel.lengthscale.read_value() - lengthscale_mean_regularizer) / lengthscale_std_regularizer))
neg_elbo = lambda: -elbo_fullcov(q_mu=q_mu,
q_sqrt_latent=q_sqrt_latent,
inducing_inputs=u,
D_idxs=D_idxs,
max_idxs=max_idxs,
kernel=kernel,
inputs=inputs,
indifference_threshold=indifference_threshold,
standard_mvn_samples=standard_mvn_samples,
n_sample=n_sample) \
+ lengthscale_regularizer
# optimizer = tf.keras.optimizers.Adam()
optimizer = tf.keras.optimizers.RMSprop(rho=0.0 if deterministic else 0.9)
print("Optimizer config: ", optimizer.get_config())
trainable_vars = [u, q_mu, q_sqrt_latent] + list(kernel.trainable_variables)
if is_threshold_trainable:
print("Indifference_threshold is trainable.")
trainable_vars.append(indifference_threshold)
start_time = time.time()
lengthscale_init = np.array([lengthscale_mean_regularizer for i in range(input_dims)])
kernel.lengthscale.assign(lengthscale_init)
# reduce initial lengthscale if it is too big
while True:
is_lengthscale_too_big = False
try:
cur_neg_elbo = neg_elbo().numpy()
is_lengthscale_too_big = (cur_neg_elbo > 1e10)
except tf.errors.InvalidArgumentError as err:
# lengthscale is too big that it causes numerical error
is_lengthscale_too_big = True
if not is_lengthscale_too_big:
break
lengthscale_init = np.array(lengthscale_init) * 0.8
kernel.lengthscale.assign(lengthscale_init)
print("Initialize lengthscale at {}".format(lengthscale_init))
print(" Initial negative ELBO: {}".format(cur_neg_elbo))
try:
for i in range(num_steps):
optimizer.minimize(neg_elbo, var_list=trainable_vars)
if i % 100 == 0:
print('Negative ELBO at step {}: {} in {:.4f}s'.format(i,
neg_elbo().numpy(),
time.time() - start_time))
start_time = time.time()
except tf.errors.InvalidArgumentError as err:
print(err)
print(q_mu)
print(q_sqrt_latent)
print(u)
print(inputs)
gpflow.utilities.print_summary(kernel)
raise ValueError
result = {"q_mu": q_mu,
"q_sqrt": tf.linalg.band_part(q_sqrt_latent, -1, 0),
"inputs": inputs,
"u": u,
"kernel": kernel,
"indifference_threshold": indifference_threshold,
"loss": neg_elbo().numpy()}
return result
def init_SVGP_fullcov(q_mu, q_sqrt, inducing_variables, kernel, likelihood):
"""
Returns a gpflow SVGP model using the values obtained from train_model.
:param q_mu: np array or tensor of shape (num_inputs, 1)
:param q_sqrt: np array or tensor of shape (num_inputs, num_inputs). Lower triangular matrix
:param inducing_variables: tensor of shape (num_inducing, input_dims)
:param inputs: np array or tensor of shape (num_inputs, input_dims)
:param kernel: gpflow kernel
:param likelihood: gpflow likelihood
"""
model = gpflow.models.SVGP(kernel=kernel,
likelihood=likelihood,
inducing_variable=inducing_variables,
whiten=False)
model.q_mu.assign(q_mu)
model.q_sqrt.assign(q_sqrt)
# Set so that the parameters learned do not change if further optimization over
# other parameters is performed
set_trainable(model.q_mu, False)
set_trainable(model.q_sqrt, False)
set_trainable(model.inducing_variable.Z, False)
return model
| [
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
11192,
273,
11125,
62,
1676,
65,
1799,
355,
256,
46428,
198,
11748,
27809,
11125,
198,
6738,
27809,
11125,
13,
315,
2410,
1330,
900,
... | 2.188674 | 7,929 |