content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
##############################################################################
# insert.py
# https://github.com/DigiLog-N/SynopticDataClient
# Copyright 2020 Canvass Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from cassandra.cluster import Cluster
cluster = Cluster()
session = cluster.connect('digilog_n')
'''
rows = session.execute('SELECT * FROM digilog_n.obd')
for row in rows:
print(row)
user_lookup_stmt = session.prepare("SELECT * FROM users WHERE user_id=?")
INSERT INTO
users = []
for user_id in user_ids_to_query:
user = session.execute(user_lookup_stmt, [user_id])
users.append(user)
session.execute(
"""
INSERT INTO users (name, credits, user_id)
VALUES (%s, %s, %s)
""",
("John O'Reilly", 42, uuid.uuid1())
)
'''
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
7550,
13,
9078,
198,
2,
3740,
1378,
12567,
13,
785,
14,
19511,
72,
11187,
12,
45,
14,
29934,
8738,
291,
6601,
11792,
198,
2,
15069,
12131,
1680,
85,
562,
23500,
11,
3457,
13,
198,
2,
220,
19... | 3.304136 | 411 |
import pytest
from btreelab.disk import Disk, DiskController
@pytest.fixture()
def dc():
'''disk controller
'''
return DiskController(block_size=124, block_num=8)
| [
198,
11748,
12972,
9288,
220,
198,
6738,
275,
33945,
417,
397,
13,
39531,
1330,
31664,
11,
31664,
22130,
220,
198,
198,
31,
9078,
9288,
13,
69,
9602,
3419,
220,
198,
4299,
30736,
33529,
220,
198,
220,
220,
220,
705,
7061,
39531,
10444... | 2.591549 | 71 |
# Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gapit_test_framework import gapit_test, require, require_equal
from gapit_test_framework import require_not_equal, little_endian_bytes_to_int
from gapit_test_framework import GapitTest, get_read_offset_function
from struct_offsets import VulkanStruct, UINT32_T, SIZE_T, POINTER
from struct_offsets import HANDLE, FLOAT, CHAR, ARRAY
from vulkan_constants import *
FRAMEBUFFER_CREATE_INFO = [
("sType", UINT32_T), ("pNext", POINTER), ("flags", UINT32_T),
("renderPass", HANDLE), ("attachmentCount", UINT32_T),
("pAttachments", POINTER), ("width", UINT32_T), ("height", UINT32_T),
("layers", UINT32_T)
]
@gapit_test("vkCreateFramebuffer_test")
| [
2,
15069,
2177,
3012,
3457,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257... | 3.191214 | 387 |
#
# Copyright (2021) The Delta Lake Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from scripts.utils import *
from datetime import datetime
import time
class BenchmarkSpec:
"""
Specifications of a benchmark.
:param format_name: Spark format name
:param maven_artifacts: Maven artifact name in x:y:z format
:param spark_confs: list of spark conf strings in key=value format
:param benchmark_main_class: Name of main Scala class from the JAR to run
:param main_class_args command line args for the main class
"""
class TPCDSDataLoadSpec(BenchmarkSpec):
"""
Specifications of TPC-DS data load process.
Always mixin in this first before the base benchmark class.
"""
class TPCDSBenchmarkSpec(BenchmarkSpec):
"""
Specifications of TPC-DS benchmark
"""
# ============== Delta benchmark specifications ==============
class DeltaBenchmarkSpec(BenchmarkSpec):
"""
Specification of a benchmark using the Delta format
"""
@staticmethod
# ============== General benchmark execution ==============
class Benchmark:
"""
Represents a benchmark that can be run on a remote Spark cluster
:param benchmark_name: A name to be used for uniquely identifying this benchmark.
Added to file names generated by this benchmark.
:param benchmark_spec: Specification of the benchmark. See BenchmarkSpec.
"""
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
| [
2,
198,
2,
15069,
357,
1238,
2481,
8,
383,
16978,
6233,
4935,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,... | 3.397993 | 598 |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/display.ipynb (unless otherwise specified).
__all__ = ['encode', 'DiscordEncoder', 'Formatter', 'serialize_content', 'html_content']
# Cell
import discord
# Cell
import json
# Cell
#TODO change the data model for this to something more standard.
# use only strings for the keywords rather than discord objects | [
2,
47044,
7730,
1677,
1137,
11617,
0,
8410,
5626,
48483,
0,
9220,
284,
4370,
25,
299,
1443,
14,
13812,
13,
541,
2047,
65,
357,
25252,
4306,
7368,
737,
198,
198,
834,
439,
834,
796,
37250,
268,
8189,
3256,
705,
15642,
585,
27195,
123... | 3.398148 | 108 |
import string
import base64
from distutils.util import strtobool
from marshmallow.exceptions import ValidationError
from baselayer.app.access import permissions, auth_or_token
from ..base import BaseHandler
from ...models import (
DBSession,
Source,
Comment,
Group,
Candidate,
Filter,
Obj,
User,
UserNotification,
)
| [
11748,
4731,
198,
11748,
2779,
2414,
198,
6738,
1233,
26791,
13,
22602,
1330,
965,
83,
672,
970,
198,
6738,
22397,
42725,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198,
6738,
1615,
417,
2794,
13,
1324,
13,
15526,
1330,
21627,
11,
6284,... | 2.958333 | 120 |
from kafka import KafkaProducer
import json
producer = KafkaProducer(value_serializer=lambda m: json.dumps(m).encode('ascii'), bootstrap_servers=['localhost:9092'])
producer.send('event', {'id': 123, 'email_vendedor': 'asdas@mail.com'})
producer.flush() | [
6738,
479,
1878,
4914,
1330,
46906,
11547,
2189,
198,
11748,
33918,
220,
198,
198,
18230,
2189,
796,
46906,
11547,
2189,
7,
8367,
62,
46911,
7509,
28,
50033,
285,
25,
33918,
13,
67,
8142,
7,
76,
737,
268,
8189,
10786,
292,
979,
72,
... | 2.741935 | 93 |
import os, sys
sys.path.insert(1, os.path.join(os.path.abspath('.'), 'flaskstuff'))
from flask import Flask
app = Flask(__name__)
from app import views
| [
11748,
28686,
11,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
16,
11,
28686,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
397,
2777,
776,
10786,
2637,
828,
705,
2704,
2093,
41094,
6,
4008,
198,
6738,
42903,
1330,
46947,
198,
198,
1324,... | 2.732143 | 56 |
from concurrent.futures import ThreadPoolExecutor
from itertools import repeat
from pprint import pprint
import pexpect
import yaml
import logging
logging.basicConfig(
format="%(threadName)s %(name)s %(levelname)s: %(message)s", level=logging.INFO
)
if __name__ == "__main__":
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
r = send_show_to_devices(devices, "sh int desc")
pprint(r, width=120)
| [
6738,
24580,
13,
69,
315,
942,
1330,
14122,
27201,
23002,
38409,
198,
6738,
340,
861,
10141,
1330,
9585,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
198,
11748,
613,
87,
806,
198,
11748,
331,
43695,
198,
11748,
18931,
628,
198,
6404,
... | 2.638554 | 166 |
from django.contrib.auth.models import User
from django import forms
from healthapp.models import UserDoctor, UserPatient, Schedule
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
1535,
1324,
13,
27530,
1330,
11787,
37564,
11,
11787,
12130,
1153,
11,
19281,
628,
628,
198
] | 3.777778 | 36 |
from __future__ import absolute_import
from . import qbatch
from .qbatch import qbatchParser
from .qbatch import qbatchDriver
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
764,
1330,
10662,
43501,
198,
6738,
764,
80,
43501,
1330,
10662,
43501,
46677,
198,
6738,
764,
80,
43501,
1330,
10662,
43501,
32103,
198
] | 3.735294 | 34 |
__author__ = 'ThanhNam'
# Enter your code for the AdoptionCenter class here
# Be sure to include the __init__, get_name, get_species_count, get_number_of_species, and adopt_pet methods.
class AdoptionCenter:
"""
The AdoptionCenter class stores the important information that a
client would need to know about, such as the different numbers of
species stored, the location, and the name. It also has a method
to adopt a pet.
""" | [
834,
9800,
834,
796,
705,
817,
272,
71,
45,
321,
6,
198,
2,
6062,
534,
2438,
329,
262,
1215,
18076,
23656,
1398,
994,
198,
2,
1355,
1654,
284,
2291,
262,
11593,
15003,
834,
11,
651,
62,
3672,
11,
651,
62,
35448,
62,
9127,
11,
65... | 3.365672 | 134 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
from numpy.testing import assert_allclose
from ...utils.testing import requires_data
from ..core import gammapy_extra
from ...datasets import load_poisson_stats_image
@requires_data("gammapy-extra")
def test_gammapy_extra():
"""Try loading a file from gammapy-extra.
"""
assert gammapy_extra.dir.is_dir()
@requires_data("gammapy-extra")
| [
2,
49962,
739,
257,
513,
12,
565,
682,
347,
10305,
3918,
5964,
532,
766,
38559,
24290,
13,
81,
301,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
6738,
299,
... | 3.10559 | 161 |
import unittest
from gpflow.tf_wraps import vec_to_tri
import tensorflow as tf
import numpy as np
from testing.gpflow_testcase import GPflowTestCase
from gpflow.tf_wraps import vec_to_tri
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
27809,
11125,
13,
27110,
62,
29988,
862,
1330,
43030,
62,
1462,
62,
28461,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
4856,
13,
31197,
11125,
62,
9288,... | 2.8 | 85 |
from aiohttp.client import ClientSession
from http import HTTPStatus
from sanic.exceptions import abort
| [
6738,
257,
952,
4023,
13,
16366,
1330,
20985,
36044,
198,
6738,
2638,
1330,
14626,
19580,
198,
6738,
5336,
291,
13,
1069,
11755,
1330,
15614,
628
] | 4.2 | 25 |
from flask import Blueprint, redirect, url_for, request, render_template, flash, g
from flask.ext.login import login_user, logout_user, current_user, login_required
from GUTG_Vote import utilities
from GUTG_Vote.models import User, Game
from GUTG_Vote.forms import LoginForm
from GUTG_Vote.extensions import db
main = Blueprint('main', __name__)
@main.before_request
@main.route('/')
@main.route('/login', methods=['GET', 'POST'])
@main.route('/logout')
@main.route('/<game_id>/vote', methods=['POST'])
@login_required | [
6738,
42903,
1330,
39932,
11,
18941,
11,
19016,
62,
1640,
11,
2581,
11,
8543,
62,
28243,
11,
7644,
11,
308,
198,
6738,
42903,
13,
2302,
13,
38235,
1330,
17594,
62,
7220,
11,
2604,
448,
62,
7220,
11,
1459,
62,
7220,
11,
17594,
62,
... | 2.971751 | 177 |
import discord, dislash, datetime
from dislash import slash_command, SlashInteraction, ContextMenuInteraction
from discord.ext import commands
from src.extras.views import url_button_generator
| [
11748,
36446,
11,
595,
17055,
11,
4818,
8079,
198,
6738,
595,
17055,
1330,
24632,
62,
21812,
11,
26616,
9492,
2673,
11,
30532,
23381,
9492,
2673,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
12351,
13,
2302,
8847,
13,
33571,
1330,... | 4.020833 | 48 |
#!/usr/bin/env python3
# Get annotations with context from database.
import sys
import os
import re
from logging import warning, error
from standoff import Textbound
try:
from sqlitedict import SqliteDict
except ImportError:
error('failed to import sqlitedict, try `pip3 install sqlitedict`')
raise
def get_annotation(standoff, id_):
"""Get annotation with given ID from standoff"""
for ln, line in enumerate(standoff.splitlines(), start=1):
fields = line.split('\t')
if fields[0] == id_:
if id_[0] == 'T':
return Textbound.from_standoff(line)
else:
raise NotImplementedError()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
3497,
37647,
351,
4732,
422,
6831,
13,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
302,
198,
198,
6738,
18931,
1330,
6509,
11,
4049,
198,
198,
6738,
33379,
1330,... | 2.489933 | 298 |
"""botform URL Configuration
"""
from django.conf.urls import url, include
from rest_framework import routers
from botform import api as form_api
router = routers.DefaultRouter()
router.register(r'forms', form_api.FormsViewSet)
router.register(r'submissions', form_api.SubmissionsViewSet)
urlpatterns = [
url(r'^api/v1/', include(router.urls)),
url(r'^api/v1/forms/(?P<pk>\d+)/details/?$', form_api.grid_details),
url(r'^api/v1/forms/(?P<pk>\d+)/details/submission/?$', form_api.grid_submissions),
url(r'^', include('botform.urls')),
url(r'^accounts/', include('allauth.urls')),
]
| [
37811,
13645,
687,
10289,
28373,
198,
198,
37811,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
2291,
198,
6738,
1334,
62,
30604,
1330,
41144,
198,
198,
6738,
10214,
687,
1330,
40391,
355,
1296,
62,
15042,
198,
198,... | 2.489712 | 243 |
from pymongo import MongoClient
| [
6738,
279,
4948,
25162,
1330,
42591,
11792,
628,
198
] | 3.777778 | 9 |
from operacaoes3 import mais , menos , vezes , divicao , resto , raiz , divicao_f
n1 = int(input('a'))
n2 = int(input('b'))
a = mais(n1,n2)
b = menos(n1,n2)
c = vezes(n1,n2)
d = divicao(n1,n2)
e = resto(n1,n2)
f = raiz(n1,n2)
g = divicao_f(n1,n2)
print(f'{a}')
print(f'{b}')
print(f'{c}')
print(f'{d}')
print(f'{e}')
print(f'{f}')
print(f'{g}')
| [
6738,
1515,
22260,
3028,
18,
1330,
285,
15152,
837,
1450,
418,
837,
1569,
12271,
837,
2659,
3970,
78,
837,
1334,
78,
837,
2179,
528,
837,
2659,
3970,
78,
62,
69,
198,
198,
77,
16,
796,
493,
7,
15414,
10786,
64,
6,
4008,
198,
77,
... | 1.679426 | 209 |
#!/usr/bin/env python
import argparse
import os
import psycopg2
import sys
# Why is it so hard to get python imports working?
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from scry import scry
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
17331,
22163,
70,
17,
198,
11748,
25064,
198,
198,
2,
4162,
318,
340,
523,
1327,
284,
651,
21015,
17944,
1762,
30,
198,
17597,
1... | 2.644231 | 104 |
__version__ = '0.1.5'
try:
import pandas
pandas_df_type = pandas.DataFrame
except ImportError:
pandas_df_type = type(None)
try:
import msgpack
has_msgpack = True
except ImportError:
has_msgpack = False
try:
import os
login = os.getlogin()
except OSError:
login = ''
| [
834,
9641,
834,
796,
705,
15,
13,
16,
13,
20,
6,
198,
198,
28311,
25,
198,
220,
220,
220,
1330,
19798,
292,
198,
220,
220,
220,
19798,
292,
62,
7568,
62,
4906,
796,
19798,
292,
13,
6601,
19778,
198,
16341,
17267,
12331,
25,
198,
... | 2.346154 | 130 |
#!/usr/bin/env python3
import argparse
import sys
import os
from pathlib import Path
from Bio import SeqIO
import gzip
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
1822,
29572,
198,
11748,
25064,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
16024,
1330,
1001,
80,
9399,
198,
11748,
308,
13344,
628,
198
] | 3.184211 | 38 |
""" Helper to assemble code from a web page. """
import flask
import subprocess
import tempfile
main_html = r"""
<!DOCTYPE html>
<html><head>
<title>Online compiler</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="http://www.w3schools.com/lib/w3.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
<script>
function do_compile() {
source = $("#source").val()
$.post("compile", { source: source },
function(data, status) {
$("#result").text(data.replace("\\n", "<br>", "g"));
});
}
</script>
</head>
<body>
<div class="w3-container w3-teal"><h1>Online assembler</h1></div>
<div class="w3-container"><textarea id="source">mov rax,rbx</textarea></div>
<div class="w3-container">
<button class="w3-btn" onclick="do_compile()">Compile</button>
</div>
<div class="w3-container"><p id="result"></p></div>
<div class="w3-container w3-teal"><p>By Windel Bouwman 2016</p></div>
</body></html>
"""
app = flask.Flask(__name__)
@app.route('/')
@app.route('/compile', methods=['POST'])
if __name__ == '__main__':
app.run()
| [
37811,
5053,
525,
284,
25432,
2438,
422,
257,
3992,
2443,
13,
37227,
198,
198,
11748,
42903,
198,
11748,
850,
14681,
198,
11748,
20218,
7753,
198,
198,
12417,
62,
6494,
796,
374,
37811,
198,
27,
0,
18227,
4177,
56,
11401,
27711,
29,
1... | 2.555305 | 443 |
if __name__ == "__main__":
a = input("first number:")
b = input("second number:")
print(', '.join(swap(a, b)))
| [
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
257,
796,
5128,
7203,
11085,
1271,
25,
4943,
198,
220,
275,
796,
5128,
7203,
12227,
1271,
25,
4943,
198,
220,
3601,
7,
3256,
45302,
22179,
7,
2032,
499,
7,... | 2.5 | 48 |
import os
import sys
import time
import math
import inspect
import copy
import logging
import numpy as np
import cv2
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from .DaSiamRPN_net import SiamRPNvot, SiamRPNBIG, SiamRPNotb
from .run_SiamRPN import generate_anchor, tracker_eval
from .DaSiamRPN_utils import get_subwindow_tracking
class DaSiamRPNParams:
"""
:param int model: 0: SiamRPNvot 1: SiamRPNBIG 2: SiamRPNotb,
:param str windowing: to penalize large displacements [cosine/uniform]
:param int exemplar_size: input z size
:param int instance_size: input x size (search region)
:param float context_amount: context amount for the exemplar
:param bool adaptive: adaptive change search region
:param int score_size: size of score map
:param int anchor_num: number of anchors
"""
class DaSiamRPN:
"""
:type params: DaSiamRPNParams
:type logger: logging.RootLogger
:type states: list[dict]
"""
def __init__(self, params, logger, target_id=0,
label='generic', confidence=1.0):
"""
:type params: DaSiamRPNParams
:type logger: logging.RootLogger | None
:type target_id: int
:rtype: None
"""
# self.tf_graph = tf.Graph()
# avoid printing TF debugging information
self._params = params
self._logger = logger
self.target_id = target_id
self.label = label
self.confidence = confidence
self.cumulative_confidence = confidence
if self._logger is None:
self._logger = logging.getLogger()
self._logger.setLevel(logging.INFO)
# self.logger.handlers[0].setFormatter(logging.Formatter(
# '%(levelname)s::%(module)s::%(funcName)s::%(lineno)s : %(message)s'))
self.anchor = []
# self.params.update(cfg={})
self.associated_frames = 1
self.unassociated_frames = 0
self.associated = 0
# self.is_initialized = 0
self.bbox = None
self.gpu_id = self._params.gpu_id
self.pretrained_wts_dir = self._params.pretrained_wts_dir
if self._params.rel_path:
self.pretrained_wts_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), self.pretrained_wts_dir)
self.net = None
self.score_sz = self._params.score_size
self.final_score_sz = self._params.score_size
if self._params.update_location == 0:
self._logger.info('Location updating is disabled')
self.state = None
def initialize(self, init_frame, init_bbox):
"""
:param np.ndarray init_frame:
:param np.ndarray | list | tuple init_bbox:
:return:
"""
if self.net is None:
if self._params.model == 0:
net = SiamRPNvot()
net.load_state_dict(torch.load(os.path.join(self.pretrained_wts_dir, 'SiamRPNVOT.model')))
# self._logger.info('Using SiamRPNVOT model')
elif self._params.model == 1:
net = SiamRPNBIG()
net.load_state_dict(torch.load(os.path.join(self.pretrained_wts_dir, 'SiamRPNBIG.model')))
# self._logger.info('Using SiamRPNBIG model')
elif self._params.model == 2:
net = SiamRPNotb()
net.load_state_dict(torch.load(os.path.join(self.pretrained_wts_dir, 'SiamRPNOTB.model')))
# self._logger.info('Using SiamRPNOTB model')
else:
raise IOError('Invalid model_type: {}'.format(self._params.model))
net.eval().cuda(self.gpu_id)
self.net = net
cx, cy, target_w, target_h = init_bbox
target_pos = np.array([cx, cy])
target_sz = np.array([target_w, target_h])
self._params.update(self.net.cfg)
state = dict()
state['im_h'] = init_frame.shape[0]
state['im_w'] = init_frame.shape[1]
if self._params.adaptive:
if ((target_sz[0] * target_sz[1]) / float(state['im_h'] * state['im_w'])) < 0.004:
self._params.instance_size = 287 # small object big search region
else:
self._params.instance_size = 271
self._params.score_size = (
self._params.instance_size - self._params.exemplar_size) / self._params.total_stride + 1
self.anchor = generate_anchor(self._params.total_stride, self._params.scales, self._params.ratios,
int(self._params.score_size))
avg_chans = np.mean(init_frame, axis=(0, 1))
wc_z = target_sz[0] + self._params.context_amount * sum(target_sz)
hc_z = target_sz[1] + self._params.context_amount * sum(target_sz)
s_z = round(np.sqrt(wc_z * hc_z))
# initialize the exemplar
z_crop = get_subwindow_tracking(init_frame, target_pos, self._params.exemplar_size, s_z, avg_chans)
z = Variable(z_crop.unsqueeze(0))
self.net.temple(z.cuda(self.gpu_id))
if self._params.windowing == 'cosine':
window = np.outer(np.hanning(self.score_sz), np.hanning(self.score_sz))
elif self._params.windowing == 'uniform':
window = np.ones((self.score_sz, self.score_sz))
else:
raise IOError('Invalid windowing type: {}'.format(self._params.windowing))
window = np.tile(window.flatten(), self._params.anchor_num)
# state['p'] = self.params
pos_x, pos_y = target_pos
target_w, target_h = target_sz
xmin, ymin = pos_x - target_w / 2, pos_y - target_h / 2
xmax, ymax = xmin + target_w, ymin + target_h
bbox = [xmin, ymin, target_w, target_h]
state['net'] = self.net
state['avg_chans'] = avg_chans
state['window'] = window
state['target_pos'] = target_pos
state['target_sz'] = target_sz
self.bbox = [xmin, ymin, xmax, ymax]
self.state = state
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
10688,
198,
11748,
10104,
198,
11748,
4866,
198,
11748,
18931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
28034,
198,
6738,
28034,
13,
2306,
... | 2.128286 | 2,853 |
import random
from collections import OrderedDict
from string import ascii_letters, digits
from django import forms
from django.contrib.auth.models import User, Group
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.template import loader
from .models import UserProfile
# vim: set ts=4 sw=4 et:
| [
11748,
4738,
201,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
201,
198,
6738,
4731,
1330,
355,
979,
72,
62,
15653,
11,
19561,
201,
198,
201,
198,
6738,
42625,
14208,
1330,
5107,
201,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439... | 3.09322 | 118 |
import numpy as np
# hidden layer activation function
# derivate of hidden layer activation function for gradient descent
# output layer activation function
# cost function
# derivative of cost function for gradient descent
| [
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
2,
7104,
7679,
14916,
2163,
201,
198,
201,
198,
2,
16124,
378,
286,
7104,
7679,
14916,
2163,
329,
31312,
18598,
201,
198,
201,
198,
2,
5072,
7679,
14916,
2163,
201,
198,
201,
198,
2... | 4.12069 | 58 |
l_in = lasagne.layers.InputLayer((None, 784))
l_out = lasagne.layers.DenseLayer(l_in,
num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
X_sym = T.matrix()
y_sym = T.ivector()
output = lasagne.layers.get_output(l_out, X_sym)
pred = output.argmax(-1)
loss = T.mean(lasagne.objectives.categorical_crossentropy(output, y_sym))
acc = T.mean(T.eq(pred, y_sym))
params = lasagne.layers.get_all_params(l_out)
grad = T.grad(loss, params)
updates = lasagne.updates.adam(grad, params, learning_rate=0.001)
f_train = theano.function([X_sym, y_sym], [loss, acc], updates=updates)
f_val = theano.function([X_sym, y_sym], [loss, acc])
f_predict = theano.function([X_sym], pred)
BATCH_SIZE = 64
N_BATCHES = len(X_train) // BATCH_SIZE
N_VAL_BATCHES = len(X_val) // BATCH_SIZE
for epoch in range(10):
train_loss = 0
train_acc = 0
for _ in range(N_BATCHES):
X, y = next(train_batches)
loss, acc = f_train(X, y)
train_loss += loss
train_acc += acc
train_loss /= N_BATCHES
train_acc /= N_BATCHES
val_loss = 0
val_acc = 0
for _ in range(N_VAL_BATCHES):
X, y = next(val_batches)
loss, acc = f_val(X, y)
val_loss += loss
val_acc += acc
val_loss /= N_VAL_BATCHES
val_acc /= N_VAL_BATCHES
print('Epoch {}, Train (val) loss {:.03f} ({:.03f}) ratio {:.03f}'.format(
epoch, train_loss, val_loss, val_loss/train_loss))
print('Train (val) accuracy {:.03f} ({:.03f})'.format(train_acc, val_acc))
weights = l_out.W.get_value()
plt.figure(figsize=(12,3))
for i in range(10):
plt.subplot(1, 10, i+1)
plt.imshow(weights[:,i].reshape((28, 28)), cmap='gray', interpolation='nearest')
plt.axis('off') | [
75,
62,
259,
796,
39990,
21080,
13,
75,
6962,
13,
20560,
49925,
19510,
14202,
11,
767,
5705,
4008,
198,
75,
62,
448,
796,
39990,
21080,
13,
75,
6962,
13,
35,
1072,
49925,
7,
75,
62,
259,
11,
198,
220,
220,
220,
220,
220,
220,
22... | 2.065517 | 870 |
'''
Created on Sep 8, 2016
@author: nicolas
'''
import re
import fnmatch
from functools import reduce
from lemoncheesecake.reporting import load_report
from lemoncheesecake.reporting.reportdir import DEFAULT_REPORT_DIR_NAME
from lemoncheesecake.reporting.report import Result, TestResult, Step, Log, Check, Attachment, Url
from lemoncheesecake.testtree import BaseTest, BaseSuite
from lemoncheesecake.suite import Test
from lemoncheesecake.exceptions import UserError
_NEGATION_FLAGS = "-^~"
| [
7061,
6,
198,
41972,
319,
8621,
807,
11,
1584,
198,
198,
31,
9800,
25,
9200,
12456,
198,
7061,
6,
198,
198,
11748,
302,
198,
11748,
24714,
15699,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
198,
6738,
18873,
2395,
274,
46557,
13,
... | 3.259494 | 158 |
STATS = [
{
"num_node_expansions": 653,
"plan_length": 167,
"search_time": 0.52,
"total_time": 0.52
},
{
"num_node_expansions": 978,
"plan_length": 167,
"search_time": 0.86,
"total_time": 0.86
},
{
"num_node_expansions": 1087,
"plan_length": 194,
"search_time": 15.85,
"total_time": 15.85
},
{
"num_node_expansions": 923,
"plan_length": 198,
"search_time": 15.21,
"total_time": 15.21
},
{
"num_node_expansions": 667,
"plan_length": 142,
"search_time": 13.94,
"total_time": 13.94
},
{
"num_node_expansions": 581,
"plan_length": 156,
"search_time": 11.54,
"total_time": 11.54
},
{
"num_node_expansions": 505,
"plan_length": 134,
"search_time": 2.79,
"total_time": 2.79
},
{
"num_node_expansions": 953,
"plan_length": 165,
"search_time": 6.22,
"total_time": 6.22
},
{
"num_node_expansions": 792,
"plan_length": 163,
"search_time": 0.33,
"total_time": 0.33
},
{
"num_node_expansions": 554,
"plan_length": 160,
"search_time": 0.27,
"total_time": 0.27
},
{
"num_node_expansions": 706,
"plan_length": 156,
"search_time": 2.44,
"total_time": 2.44
},
{
"num_node_expansions": 620,
"plan_length": 138,
"search_time": 1.65,
"total_time": 1.65
},
{
"num_node_expansions": 661,
"plan_length": 169,
"search_time": 0.28,
"total_time": 0.28
},
{
"num_node_expansions": 774,
"plan_length": 178,
"search_time": 0.4,
"total_time": 0.4
},
{
"num_node_expansions": 615,
"plan_length": 171,
"search_time": 0.53,
"total_time": 0.53
},
{
"num_node_expansions": 516,
"plan_length": 134,
"search_time": 0.71,
"total_time": 0.71
},
{
"num_node_expansions": 1077,
"plan_length": 221,
"search_time": 0.58,
"total_time": 0.58
},
{
"num_node_expansions": 1029,
"plan_length": 213,
"search_time": 0.62,
"total_time": 0.62
},
{
"num_node_expansions": 753,
"plan_length": 173,
"search_time": 0.47,
"total_time": 0.47
},
{
"num_node_expansions": 814,
"plan_length": 210,
"search_time": 0.5,
"total_time": 0.5
},
{
"num_node_expansions": 569,
"plan_length": 134,
"search_time": 3.06,
"total_time": 3.06
},
{
"num_node_expansions": 899,
"plan_length": 176,
"search_time": 5.84,
"total_time": 5.84
},
{
"num_node_expansions": 531,
"plan_length": 144,
"search_time": 3.15,
"total_time": 3.15
},
{
"num_node_expansions": 631,
"plan_length": 164,
"search_time": 3.74,
"total_time": 3.74
},
{
"num_node_expansions": 479,
"plan_length": 138,
"search_time": 0.11,
"total_time": 0.11
},
{
"num_node_expansions": 941,
"plan_length": 148,
"search_time": 0.22,
"total_time": 0.22
},
{
"num_node_expansions": 1023,
"plan_length": 197,
"search_time": 9.46,
"total_time": 9.46
},
{
"num_node_expansions": 1152,
"plan_length": 196,
"search_time": 12.7,
"total_time": 12.7
},
{
"num_node_expansions": 629,
"plan_length": 147,
"search_time": 4.14,
"total_time": 4.14
},
{
"num_node_expansions": 697,
"plan_length": 160,
"search_time": 2.82,
"total_time": 2.82
},
{
"num_node_expansions": 646,
"plan_length": 158,
"search_time": 3.74,
"total_time": 3.74
},
{
"num_node_expansions": 741,
"plan_length": 152,
"search_time": 4.56,
"total_time": 4.56
},
{
"num_node_expansions": 486,
"plan_length": 136,
"search_time": 1.77,
"total_time": 1.77
},
{
"num_node_expansions": 602,
"plan_length": 146,
"search_time": 3.22,
"total_time": 3.22
},
{
"num_node_expansions": 774,
"plan_length": 186,
"search_time": 1.56,
"total_time": 1.56
},
{
"num_node_expansions": 1512,
"plan_length": 209,
"search_time": 4.48,
"total_time": 4.48
},
{
"num_node_expansions": 791,
"plan_length": 180,
"search_time": 14.5,
"total_time": 14.5
},
{
"num_node_expansions": 1019,
"plan_length": 211,
"search_time": 18.59,
"total_time": 18.59
},
{
"num_node_expansions": 450,
"plan_length": 133,
"search_time": 2.75,
"total_time": 2.75
},
{
"num_node_expansions": 526,
"plan_length": 135,
"search_time": 3.02,
"total_time": 3.02
},
{
"num_node_expansions": 1329,
"plan_length": 182,
"search_time": 8.07,
"total_time": 8.07
},
{
"num_node_expansions": 655,
"plan_length": 134,
"search_time": 3.8,
"total_time": 3.8
},
{
"num_node_expansions": 636,
"plan_length": 159,
"search_time": 7.13,
"total_time": 7.13
},
{
"num_node_expansions": 1403,
"plan_length": 196,
"search_time": 16.16,
"total_time": 16.16
},
{
"num_node_expansions": 664,
"plan_length": 175,
"search_time": 4.18,
"total_time": 4.18
},
{
"num_node_expansions": 760,
"plan_length": 150,
"search_time": 6.37,
"total_time": 6.37
},
{
"num_node_expansions": 593,
"plan_length": 163,
"search_time": 9.42,
"total_time": 9.42
},
{
"num_node_expansions": 1043,
"plan_length": 179,
"search_time": 16.75,
"total_time": 16.75
},
{
"num_node_expansions": 390,
"plan_length": 103,
"search_time": 0.46,
"total_time": 0.46
},
{
"num_node_expansions": 419,
"plan_length": 120,
"search_time": 0.55,
"total_time": 0.55
},
{
"num_node_expansions": 606,
"plan_length": 160,
"search_time": 13.41,
"total_time": 13.41
},
{
"num_node_expansions": 905,
"plan_length": 213,
"search_time": 29.84,
"total_time": 29.84
},
{
"num_node_expansions": 525,
"plan_length": 146,
"search_time": 0.31,
"total_time": 0.31
},
{
"num_node_expansions": 522,
"plan_length": 147,
"search_time": 0.32,
"total_time": 0.32
},
{
"num_node_expansions": 652,
"plan_length": 165,
"search_time": 10.19,
"total_time": 10.19
},
{
"num_node_expansions": 1188,
"plan_length": 178,
"search_time": 13.24,
"total_time": 13.24
},
{
"num_node_expansions": 450,
"plan_length": 136,
"search_time": 1.48,
"total_time": 1.48
},
{
"num_node_expansions": 1179,
"plan_length": 209,
"search_time": 3.44,
"total_time": 3.44
},
{
"num_node_expansions": 834,
"plan_length": 204,
"search_time": 20.08,
"total_time": 20.08
},
{
"num_node_expansions": 1133,
"plan_length": 187,
"search_time": 15.61,
"total_time": 15.61
},
{
"num_node_expansions": 777,
"plan_length": 181,
"search_time": 13.35,
"total_time": 13.35
},
{
"num_node_expansions": 591,
"plan_length": 136,
"search_time": 2.59,
"total_time": 2.59
},
{
"num_node_expansions": 580,
"plan_length": 143,
"search_time": 2.89,
"total_time": 2.89
},
{
"num_node_expansions": 977,
"plan_length": 173,
"search_time": 8.97,
"total_time": 8.97
},
{
"num_node_expansions": 694,
"plan_length": 167,
"search_time": 8.22,
"total_time": 8.22
},
{
"num_node_expansions": 861,
"plan_length": 188,
"search_time": 1.14,
"total_time": 1.14
},
{
"num_node_expansions": 790,
"plan_length": 160,
"search_time": 0.93,
"total_time": 0.93
},
{
"num_node_expansions": 841,
"plan_length": 188,
"search_time": 5.61,
"total_time": 5.61
},
{
"num_node_expansions": 436,
"plan_length": 128,
"search_time": 2.46,
"total_time": 2.46
},
{
"num_node_expansions": 550,
"plan_length": 127,
"search_time": 0.03,
"total_time": 0.03
},
{
"num_node_expansions": 434,
"plan_length": 134,
"search_time": 0.03,
"total_time": 0.03
},
{
"num_node_expansions": 958,
"plan_length": 195,
"search_time": 9.09,
"total_time": 9.09
},
{
"num_node_expansions": 658,
"plan_length": 174,
"search_time": 6.01,
"total_time": 6.01
},
{
"num_node_expansions": 370,
"plan_length": 126,
"search_time": 0.06,
"total_time": 0.06
},
{
"num_node_expansions": 440,
"plan_length": 119,
"search_time": 0.08,
"total_time": 0.08
},
{
"num_node_expansions": 648,
"plan_length": 168,
"search_time": 8.1,
"total_time": 8.1
},
{
"num_node_expansions": 832,
"plan_length": 178,
"search_time": 10.9,
"total_time": 10.9
},
{
"num_node_expansions": 355,
"plan_length": 116,
"search_time": 0.7,
"total_time": 0.7
},
{
"num_node_expansions": 495,
"plan_length": 123,
"search_time": 0.86,
"total_time": 0.86
},
{
"num_node_expansions": 612,
"plan_length": 148,
"search_time": 4.23,
"total_time": 4.23
},
{
"num_node_expansions": 1067,
"plan_length": 174,
"search_time": 6.3,
"total_time": 6.3
},
{
"num_node_expansions": 821,
"plan_length": 185,
"search_time": 3.0,
"total_time": 3.0
},
{
"num_node_expansions": 625,
"plan_length": 153,
"search_time": 2.98,
"total_time": 2.98
},
{
"num_node_expansions": 304,
"plan_length": 99,
"search_time": 0.16,
"total_time": 0.16
},
{
"num_node_expansions": 477,
"plan_length": 133,
"search_time": 0.4,
"total_time": 0.4
},
{
"num_node_expansions": 651,
"plan_length": 160,
"search_time": 0.18,
"total_time": 0.18
},
{
"num_node_expansions": 594,
"plan_length": 147,
"search_time": 0.17,
"total_time": 0.17
},
{
"num_node_expansions": 524,
"plan_length": 134,
"search_time": 5.3,
"total_time": 5.3
},
{
"num_node_expansions": 400,
"plan_length": 127,
"search_time": 4.95,
"total_time": 4.95
},
{
"num_node_expansions": 825,
"plan_length": 185,
"search_time": 6.37,
"total_time": 6.37
},
{
"num_node_expansions": 613,
"plan_length": 156,
"search_time": 4.57,
"total_time": 4.57
},
{
"num_node_expansions": 427,
"plan_length": 121,
"search_time": 0.09,
"total_time": 0.09
},
{
"num_node_expansions": 362,
"plan_length": 116,
"search_time": 0.07,
"total_time": 0.07
},
{
"num_node_expansions": 459,
"plan_length": 119,
"search_time": 0.75,
"total_time": 0.75
},
{
"num_node_expansions": 501,
"plan_length": 132,
"search_time": 0.86,
"total_time": 0.86
},
{
"num_node_expansions": 697,
"plan_length": 156,
"search_time": 4.24,
"total_time": 4.24
},
{
"num_node_expansions": 1024,
"plan_length": 162,
"search_time": 7.13,
"total_time": 7.13
},
{
"num_node_expansions": 501,
"plan_length": 122,
"search_time": 4.67,
"total_time": 4.67
},
{
"num_node_expansions": 577,
"plan_length": 126,
"search_time": 5.56,
"total_time": 5.56
},
{
"num_node_expansions": 633,
"plan_length": 152,
"search_time": 17.98,
"total_time": 17.98
},
{
"num_node_expansions": 833,
"plan_length": 186,
"search_time": 24.85,
"total_time": 24.85
},
{
"num_node_expansions": 996,
"plan_length": 183,
"search_time": 4.05,
"total_time": 4.05
},
{
"num_node_expansions": 1246,
"plan_length": 206,
"search_time": 5.39,
"total_time": 5.39
},
{
"num_node_expansions": 466,
"plan_length": 137,
"search_time": 2.03,
"total_time": 2.03
},
{
"num_node_expansions": 530,
"plan_length": 142,
"search_time": 2.28,
"total_time": 2.28
},
{
"num_node_expansions": 923,
"plan_length": 189,
"search_time": 19.77,
"total_time": 19.77
},
{
"num_node_expansions": 799,
"plan_length": 167,
"search_time": 16.16,
"total_time": 16.16
},
{
"num_node_expansions": 651,
"plan_length": 173,
"search_time": 1.38,
"total_time": 1.38
},
{
"num_node_expansions": 590,
"plan_length": 159,
"search_time": 0.94,
"total_time": 0.94
},
{
"num_node_expansions": 542,
"plan_length": 155,
"search_time": 0.07,
"total_time": 0.07
},
{
"num_node_expansions": 418,
"plan_length": 130,
"search_time": 0.05,
"total_time": 0.05
},
{
"num_node_expansions": 881,
"plan_length": 182,
"search_time": 11.01,
"total_time": 11.01
},
{
"num_node_expansions": 1256,
"plan_length": 205,
"search_time": 15.58,
"total_time": 15.58
},
{
"num_node_expansions": 612,
"plan_length": 146,
"search_time": 2.92,
"total_time": 2.92
},
{
"num_node_expansions": 567,
"plan_length": 145,
"search_time": 2.43,
"total_time": 2.43
},
{
"num_node_expansions": 655,
"plan_length": 152,
"search_time": 9.25,
"total_time": 9.25
},
{
"num_node_expansions": 499,
"plan_length": 133,
"search_time": 7.5,
"total_time": 7.5
},
{
"num_node_expansions": 500,
"plan_length": 137,
"search_time": 0.3,
"total_time": 0.3
},
{
"num_node_expansions": 869,
"plan_length": 156,
"search_time": 0.47,
"total_time": 0.47
},
{
"num_node_expansions": 522,
"plan_length": 161,
"search_time": 0.06,
"total_time": 0.06
},
{
"num_node_expansions": 712,
"plan_length": 181,
"search_time": 0.07,
"total_time": 0.07
},
{
"num_node_expansions": 708,
"plan_length": 142,
"search_time": 4.46,
"total_time": 4.46
},
{
"num_node_expansions": 642,
"plan_length": 163,
"search_time": 5.26,
"total_time": 5.26
},
{
"num_node_expansions": 426,
"plan_length": 134,
"search_time": 0.11,
"total_time": 0.11
},
{
"num_node_expansions": 471,
"plan_length": 129,
"search_time": 0.14,
"total_time": 0.14
},
{
"num_node_expansions": 520,
"plan_length": 135,
"search_time": 1.65,
"total_time": 1.65
},
{
"num_node_expansions": 666,
"plan_length": 144,
"search_time": 3.02,
"total_time": 3.02
},
{
"num_node_expansions": 563,
"plan_length": 159,
"search_time": 2.27,
"total_time": 2.27
},
{
"num_node_expansions": 566,
"plan_length": 162,
"search_time": 2.06,
"total_time": 2.06
},
{
"num_node_expansions": 836,
"plan_length": 203,
"search_time": 16.69,
"total_time": 16.69
},
{
"num_node_expansions": 604,
"plan_length": 145,
"search_time": 1.25,
"total_time": 1.25
},
{
"num_node_expansions": 506,
"plan_length": 124,
"search_time": 0.99,
"total_time": 0.99
},
{
"num_node_expansions": 851,
"plan_length": 203,
"search_time": 1.15,
"total_time": 1.15
},
{
"num_node_expansions": 603,
"plan_length": 166,
"search_time": 0.76,
"total_time": 0.76
},
{
"num_node_expansions": 497,
"plan_length": 118,
"search_time": 0.3,
"total_time": 0.3
},
{
"num_node_expansions": 590,
"plan_length": 117,
"search_time": 0.32,
"total_time": 0.32
},
{
"num_node_expansions": 409,
"plan_length": 129,
"search_time": 0.08,
"total_time": 0.08
},
{
"num_node_expansions": 669,
"plan_length": 165,
"search_time": 0.12,
"total_time": 0.12
},
{
"num_node_expansions": 786,
"plan_length": 161,
"search_time": 18.85,
"total_time": 18.85
},
{
"num_node_expansions": 474,
"plan_length": 144,
"search_time": 10.09,
"total_time": 10.09
},
{
"num_node_expansions": 579,
"plan_length": 165,
"search_time": 1.18,
"total_time": 1.18
},
{
"num_node_expansions": 620,
"plan_length": 160,
"search_time": 1.01,
"total_time": 1.01
},
{
"num_node_expansions": 1523,
"plan_length": 221,
"search_time": 25.37,
"total_time": 25.37
},
{
"num_node_expansions": 961,
"plan_length": 207,
"search_time": 18.62,
"total_time": 18.62
},
{
"num_node_expansions": 444,
"plan_length": 127,
"search_time": 3.93,
"total_time": 3.93
},
{
"num_node_expansions": 464,
"plan_length": 127,
"search_time": 4.01,
"total_time": 4.01
},
{
"num_node_expansions": 773,
"plan_length": 194,
"search_time": 0.78,
"total_time": 0.78
},
{
"num_node_expansions": 676,
"plan_length": 161,
"search_time": 0.83,
"total_time": 0.83
},
{
"num_node_expansions": 414,
"plan_length": 127,
"search_time": 0.39,
"total_time": 0.39
},
{
"num_node_expansions": 623,
"plan_length": 165,
"search_time": 0.66,
"total_time": 0.66
},
{
"num_node_expansions": 703,
"plan_length": 163,
"search_time": 1.06,
"total_time": 1.06
},
{
"num_node_expansions": 785,
"plan_length": 176,
"search_time": 1.02,
"total_time": 1.02
},
{
"num_node_expansions": 986,
"plan_length": 167,
"search_time": 15.72,
"total_time": 15.72
},
{
"num_node_expansions": 955,
"plan_length": 205,
"search_time": 12.55,
"total_time": 12.55
},
{
"num_node_expansions": 417,
"plan_length": 118,
"search_time": 0.05,
"total_time": 0.05
},
{
"num_node_expansions": 521,
"plan_length": 141,
"search_time": 0.06,
"total_time": 0.06
},
{
"num_node_expansions": 815,
"plan_length": 182,
"search_time": 26.55,
"total_time": 26.55
}
]
num_timeouts = 15
num_timeouts = 0
num_problems = 172
| [
2257,
33586,
796,
685,
198,
220,
220,
220,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
366,
22510,
62,
17440,
62,
11201,
504,
507,
1298,
718,
4310,
11,
198,
220,
220,
220,
220,
220,
220,
220,
366,
11578,
62,
13664,
1298,
26118,
... | 1.715698 | 12,237 |
import re
import ast
import operator
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {
'None': None,
'True': True,
'False': False,
'dict': dict,
'list': list,
'sorted': sorted
}
if isinstance(node_or_string, str):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, ast.Expression):
node_or_string = node_or_string.body
return _convert(node_or_string)
if __name__ == '__main__':
signatures = '''
(1, 2, 3) more
(key='value') more
(**dict(key='value')) more
(*[1, 2, 3]) more
{:class => "code", :id => "message"} Hello
(class_='before %s after' % 'middle') hello
(data-crud=dict(id=34, url='/api')) crud goes here
(u'unicode!', b'bytes!')
(' '.join(['hello', 'there'])) after
([i for i in 'hello'])
'''.strip().splitlines()
for sig in signatures:
print sig
args, remaining = parse_args(sig[1:], {'(':')', '{':'}'}[sig[0]])
for key, source, root in args:
try:
value = literal_eval(root)
print '%s: %r' % (key, value)
except ValueError as e:
print '%s -> %s' % (key, e)
print repr(remaining), 'remains'
print
| [
11748,
302,
198,
11748,
6468,
198,
11748,
10088,
628,
628,
198,
4299,
18875,
62,
18206,
7,
17440,
62,
273,
62,
8841,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
19978,
306,
13446,
281,
5408,
10139,
393,
257,
4731,
7268,
257,
... | 2.178771 | 716 |
from collections import OrderedDict
from sage.all import (operator, flatten, PolynomialRing, SR, QQ, ZZ, RR, sage, oo)
from vu_common import (pause, get_logger,is_iterable, is_str, is_empty)
is_sage_expr = lambda x: isinstance(x, sage.symbolic.expression.Expression)
is_sage_real = lambda x: isinstance(x, sage.rings.real_mpfr.RealLiteral)
is_sage_int = lambda x: isinstance(x, sage.rings.integer.Integer)
is_sage_num = lambda x: is_sage_real(x) or is_sage_int(x)
def is_sage_inf(x):
"""
Example:
sage: is_sage_inf(oo)
True
sage: is_sage_inf(-oo)
True
sage: is_sage_inf(oo+3)
True
sage: is_sage_inf(oo-3)
True
sage: is_sage_inf(SR(-oo))
True
sage: is_sage_inf(x)
False
sage: is_sage_inf(x+3)
False
sage: is_sage_inf(8)
False
"""
try:
return x.is_infinity()
except AttributeError:
return x == oo or x == -oo
is_sage_int_inf = lambda x: is_sage_int(x) or is_sage_inf(x)
to_sage_int = lambda x: x if is_sage_int(x) else ZZ(x)
def is_sage_symbol(s):
"""
sage: assert is_sage_symbol(x)
sage: assert not is_sage_symbol(x+1)
sage: assert not is_sage_symbol(1)
"""
try:
return s.is_symbol()
except AttributeError:
return False
def is_sage_rel(f, rel=None):
"""
sage: assert not is_sage_rel(7.2)
sage: assert not is_sage_rel(x)
sage: assert not is_sage_rel(x+7)
sage: assert is_sage_rel(x==3,operator.eq)
sage: assert is_sage_rel(x<=3,operator.le)
sage: assert not is_sage_rel(x<=3,operator.lt)
sage: assert not is_sage_rel(x+3,operator.lt)
sage: y = var('y')
sage: assert is_sage_rel(x+y<=3)
"""
try:
if not f.is_relational():
return False
if rel is None:
return True
else:
return f.operator() == rel
except AttributeError:
return False
is_sage_eq = lambda f: is_sage_rel(f, operator.eq)
def get_vars(ps):
"""
Returns a list of uniq variables from a list of properties
Examples:
sage: var('a b c x')
(a, b, c, x)
sage: assert [a, b, c, x] == get_vars([x^(a*b) + a**2+b+2==0, c**2-b==100, b**2 + c**2 + a**3>= 1])
sage: assert get_vars(a**2+b+5*c+2==0) == [a, b, c]
sage: assert get_vars(x+x^2) == [x]
sage: assert get_vars([3]) == []
sage: assert get_vars((3,'x + c',x+b)) == [b, x]
"""
ps = ps if is_iterable(ps) else [ps]
vs = flatten([p.variables() for p in ps if is_sage_expr(p)])
return sorted(set(vs), key=str)
def get_coefs_terms(p, base_ring = QQ, as_dict=False):
"""
Returns the Coefs and Terms of a given expression
Examples:
sage: assert get_coefs_terms(x) == ([1], [x])
sage: assert get_coefs_terms(x,as_dict=True) == {x: 1}
sage: var('a b c')
(a, b, c)
sage: assert get_coefs_terms(a**2+b+5*c+2==0) == ([1, 1, 5, 2], [a^2, b, c, 1])
sage: assert get_coefs_terms(a**2+b+5*c+2==0, as_dict=True) == {b: 1, 1: 2, a^2: 1, c: 5}
sage: assert get_coefs_terms(10/3*a**2+3*b+5*c+2) == ([10/3, 3, 5, 2], [a^2, b, c, 1])
sage: assert get_coefs_terms(10/3*a**2+3*b+5*c+2, as_dict=True) == {b: 3, 1: 2, a^2: 10/3, c: 5}
sage: assert get_coefs_terms(a+b<=3, as_dict=True) == {1: -3, b: 1, a: 1}
sage: assert all(is_sage_int(v) for v in get_coefs_terms(a+b<=3, as_dict=True, base_ring=ZZ).values())
#sage 6.2 breaks this
#sage: assert get_coefs_terms(a - b <= oo) == ([1, -1, -infinity], [a, b, 1])
sage: assert get_coefs_terms(SR(7), as_dict=True) == {1: 7}
sage: assert get_coefs_terms(SR(3))==([3], [1])
sage: assert get_coefs_terms(SR(oo))==([+Infinity], [1])
sage: assert get_coefs_terms(SR(-oo)) == ([-Infinity], [1])
sage: assert get_coefs_terms(a + b <= .9,base_ring=ZZ) == ([1, 1, -0.900000000000000], [a, b, 1])
sage: assert is_sage_int(get_coefs_terms(SR(7),base_ring=ZZ,as_dict=True).values()[0])
"""
use_wrong_base_ring = False
if is_sage_rel(p):
p = mk_rhs_0(p).lhs()
if p.is_integer() or p.is_real():
ts = [SR(1)]
cs = [p if p.is_infinity() else base_ring(p)]
else:
ss = get_vars(p)
assert not is_empty(ss), (p,ss)
mk_pr = lambda b, p: PolynomialRing(b, ss, None if len(ss) >= 2 else 1)(p)
try:
pr_p = mk_pr(base_ring, p)
except TypeError:
if base_ring == RR:
#if cannot do over RR then return None
return None
else:
#otherwise, try with RR
try:
pr_p = mk_pr(RR,p)
use_wrong_base_ring = True
except Exception as msg:
return None
cs = pr_p.coefficients()
ts = map(SR, pr_p.monomials())
if use_wrong_base_ring:
ts = [SR(1) if bool(t.is_one()) else t for t in ts]
cs_ = []
for c in cs:
if c == oo:
cs_.append(oo)
elif c == -oo:
cs_.append(-oo)
else:
try:
cs_.append(base_ring(c))
except ValueError:
cs_.append(c)
except TypeError:
cs_.append(c)
cs = cs_
assert all(is_sage_expr(t) for t in ts), ts
if as_dict:
d = OrderedDict()
for t,c in zip(ts,cs):
d[t] = c
return d
else:
return cs,ts
def mk_rhs_0(p):
"""
sage: var('x,y')
(x, y)
sage: mk_rhs_0(x - y >= 3)
x - y - 3 >= 0
sage: mk_rhs_0(x - y - 3 >= 0)
x - y - 3 >= 0
sage: mk_rhs_0(0 <= x - y - 3)
-x + y + 3 <= 0
sage: mk_rhs_0(0 == x)
-x == 0
sage: mk_rhs_0(10 == -x)
x + 10 == 0
#Sage 5.11 broke all these (i.e., broke lhs.add(..,hold=))
# sage: mk_rhs_0(x <= oo)
# x - Infinity <= 0
# sage: mk_rhs_0(x <= -oo)
# x + +Infinity <= 0
# sage: mk_rhs_0(x >= oo)
# x - Infinity >= 0
# sage: mk_rhs_0(oo >= x)
# +Infinity - x >= 0
sage: mk_rhs_0(x - y - 3)
Traceback (most recent call last):
...
AssertionError: x - y - 3
"""
assert is_sage_rel(p), p
rhs = p.rhs()
lhs = p.lhs()
if not rhs.is_zero():
lhs = lhs.add(-rhs, hold=(rhs.is_infinity() or lhs.is_infinity()))
rhs = 0
p = p.operator()(lhs, rhs)
return p
# def myreduce(op, ls):
# """
# Apply operator op to list of arguments
# Note, it seems the above arguments are *enough*, no need to implement for (-,div) etc because the function that calls this will break x - y to myreduce(op,[x,-y]) or x / y to myreduce(op,[x,1/y]) and 1/y => mul(1,y^{-1})
# sage: assert myreduce(operator.add, [x,x]) == 2*x
# sage: assert myreduce(operator.add, [3,x]) == x + 3
# sage: myreduce(operator.le, [3,x])
# 3 <= x
# sage: assert myreduce(operator.pow,[3,x]) == 3^x
# """
# if __debug__:
# assert len(ls) >= 2, ls
# assert op in [operator.add,operator.mul,
# operator.pow,operator.eq,operator.ne,
# operator.le,operator.lt,operator.ge,operator.gt], op
# return reduce(lambda a, b: op(a,b), ls[1:], ls[0])
# def mk_expr(expr, d, ring_typ=ZZ):
# """
# Make a new expression like expr but with all vars in expr replaced
# with those in dictionary d. Used when subs() is not applicable
# sage: y = var('y')
# sage: lp = MixedIntegerLinearProgram()
# sage: s0 = lp['s0']
# sage: s1 = lp['s1']
# sage: d = {x:s0,y:s1}
# sage: mk_expr(x+y+3, d)
# 3 + x_0 + x_1
# sage: mk_expr(x+y+3<=8,d)
# 3 + x_0 + x_1 <= 8
# sage: mk_expr(x==y+5,d)
# x_0 == 5 + x_1
# """
# def retval(expr):
# if is_sage_symbol(expr): #symbol, e.g. x
# return d[expr]
# else: #const , e.g. 3
# return ring_typ(expr)
# try:
# oprs = expr.operands()
# except AttributeError:
# #e.g. const 3, .5
# return retval(expr)
# if is_empty(oprs): #symbol
# return retval(expr)
# else:
# oprs = [mk_expr(o,d) for o in oprs]
# print oprs
# rs = myreduce(expr.operator(), oprs)
# return rs
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
35021,
13,
439,
1330,
357,
46616,
11,
27172,
268,
11,
12280,
26601,
498,
39687,
11,
16808,
11,
1195,
48,
11,
1168,
57,
11,
26067,
11,
35021,
11,
267,
78,
8,
198,
6738,
410,
84,
6... | 1.88061 | 4,523 |
from .read import read_axivity, read_geneactiv
from .gt3x_convert import read_gt3x
__all__ = ("read_axivity", "read_geneactiv", "read_gt3x")
| [
6738,
764,
961,
1330,
1100,
62,
897,
3458,
11,
1100,
62,
70,
1734,
15791,
198,
6738,
764,
13655,
18,
87,
62,
1102,
1851,
1330,
1100,
62,
13655,
18,
87,
198,
198,
834,
439,
834,
796,
5855,
961,
62,
897,
3458,
1600,
366,
961,
62,
... | 2.535714 | 56 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.contrib.gis.db.models.fields
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
198,
6738,
42625,
14208,
13,
10414,
1330,
... | 3 | 61 |
import setuptools
import json
with open("README.rst", "r") as fh:
long_description = fh.read()
with open('pipeline_description.json', 'r') as fh:
pipeline = json.load(fh)
name = pipeline['GeneratedBy'][0]['Name']
description = pipeline['Name']
version = pipeline['GeneratedBy'][0]['Version']
url = pipeline['GeneratedBy'][0]['CodeURL']
author = pipeline['GeneratedBy'][0]['Author']
author_email = pipeline['GeneratedBy'][0]['AuthorEmail']
setuptools.setup(
name=name,
version=version,
author=author,
author_email=author_email,
description=description,
long_description=long_description,
long_description_content_type="text/x-rst",
url=url,
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={'console_scripts': [
'{{cookiecutter.app_name}}={{cookiecutter.app_name}}.run:main'
]},
install_requires=[
"snakebids>={{cookiecutter.snakebids_version}}",
"snakemake"
],
python_requires='>=3.7'
)
| [
11748,
900,
37623,
10141,
198,
11748,
33918,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
81,
301,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
4480,
1280,
10786,... | 2.50104 | 481 |
from pymooCFD.setupOpt import checkpointFile, dataDir, nCP, archDir, \
preProcDir, cluster
from pymooCFD.util.sysTools import removeDir #, makeDir, emptyDir
from pymooCFD.setupCFD import runCase
import numpy as np
import time
import os
import tarfile
from dask.distributed import Client
from sys import exit
# def getGen(checkpointFile=checkpointFile):
# try:
# loadCP(checkpointFile=checkpointFile)
# except FileNotFoundError as err:
# print(err)
# return 0
# def popGen(gen, checkpointFile=checkpointFile):
# '''
# Parameters
# ----------
# gen : int
# generation you wish to get population from
# checkpointFile : str, optional
# checkpoint file path where Algorithm object was saved using numpy.save().
# The default is checkpointFile (defined in beginning of setupOpt.py).
# Returns
# -------
# pop :
# Contains StaticProblem object with population of individuals from
# generation <gen>.
# Notes
# -----
# - development needed to handle constraints
# '''
# alg = loadCP(checkpointFile=checkpointFile)
# X = alg.callback.data['var'][gen]
# F = alg.callback.data['obj'][gen]
# from pymoo.model.evaluator import Evaluator
# from pymoo.model.population import Population
# from pymoo.model.problem import StaticProblem
# # now the population object with all its attributes is created (CV, feasible, ...)
# pop = Population.new("X", X)
# pop = Evaluator().eval(StaticProblem(problem, F=F), pop) # , G=G), pop)
# return pop, alg
# def loadTxt(fileX, fileF, fileG=None):
# print(f'Loading population from files {fileX} and {fileF}...')
# X = np.loadtxt(fileX)
# F = np.loadtxt(fileF)
# # F = np.loadtxt(f'{dataDir}/{fileF}')
# if fileG is not None:
# # G = np.loadtxt(f'{dataDir}/{fileG}')
# G = np.loadtxt(fileG)
# else:
# G = None
# from pymoo.model.evaluator import Evaluator
# from pymoo.model.population import Population
# from pymoo.model.problem import StaticProblem
# # now the population object with all its attributes is created (CV, feasible, ...)
# pop = Population.new("X", X)
# pop = Evaluator().eval(StaticProblem(problem, F=F, G=G), pop)
# from pymooCFD.setupOpt import pop_size
# # from pymoo.algorithms.so_genetic_algorithm import GA
# # # the algorithm is now called with the population - biased initialization
# # algorithm = GA(pop_size=pop_size, sampling=pop)
# from pymoo.algorithms.nsga2 import NSGA2
# algorithm = NSGA2(pop_size=pop_size, sampling=pop)
# return algorithm
# def restartGen(gen, checkpointFile=checkpointFile):
# pop, alg = popGen(gen, checkpointFile=checkpointFile)
# alg.sampling()
# # from pymoo.algorithms.so_genetic_algorithm import GA
# # the algorithm is now called with the population - biased initialization
# # algorithm = GA(pop_size=100, sampling=pop)
# from pymoo.optimize import minimize
# from pymooCFD.setupOpt import problem
# res = minimize(problem,
# alg,
# ('n_gen', 10),
# seed=1,
# verbose=True)
# return res
# def loadTxt():
# try:
# print('Loading from text files')
# X = np.loadtxt('var.txt')
# F = np.loadtxt('obj.txt')
# except OSError as err:
# print(err)
# print('Failed to load text files')
# print('Data loading failed returning "None, None"...')
# return None, None
# def archive(dirName, archName = 'archive.tar.gz'):
# with tarfile.open(archName, 'a') as tar:
# tar.add(dirName)
# compressDir('../../dump')
# print('creating archive')
# out = tarfile.open('example.tar.gz', mode='a')
# try:
# print('adding README.txt')
# out.add('../dump')
# finally:
# print('closing tar archive')
# out.close()
#
# print('Contents of archived file:')
# t = tarfile.open('example.tar.gz', 'r')
# for member in t.getmembers():
# print(member.name)
| [
6738,
279,
4948,
2238,
22495,
35,
13,
40406,
27871,
1330,
26954,
8979,
11,
1366,
35277,
11,
299,
8697,
11,
3934,
35277,
11,
3467,
198,
220,
220,
220,
662,
2964,
66,
35277,
11,
13946,
198,
6738,
279,
4948,
2238,
22495,
35,
13,
22602,
... | 2.421517 | 1,701 |
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
t = np.linspace(1, 201, 200, endpoint=False)
sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
widths = np.arange(1, 31)
cwtmatr = signal.cwt(sig, signal.ricker, widths)
plt.imshow(cwtmatr, extent=[1, 201, 31, 1], cmap='PRGn', aspect='auto',
vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
plt.show()
| [
6738,
629,
541,
88,
1330,
6737,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
198,
83,
796,
45941,
13,
21602,
10223,
7,
16,
11,
580,
11,
939,
11,
36123,
28,
25101,
8,
198,
8... | 2.164021 | 189 |
description = 'minimal NICOS startup setup'
group = 'lowlevel'
sysconfig = dict(
cache = 'tofhw.toftof.frm2:14869',
)
| [
11213,
796,
705,
1084,
4402,
45593,
2640,
13693,
9058,
6,
198,
198,
8094,
796,
705,
9319,
5715,
6,
198,
198,
17597,
11250,
796,
8633,
7,
198,
220,
220,
220,
12940,
796,
705,
1462,
69,
36599,
13,
1462,
701,
1659,
13,
8310,
76,
17,
... | 2.48 | 50 |
from valhalla.extract import DataExtractor
from sklearn.pipeline import Pipeline
from ._transform import FeatureConcat | [
6738,
1188,
41911,
13,
2302,
974,
1330,
6060,
11627,
40450,
198,
6738,
1341,
35720,
13,
79,
541,
4470,
1330,
37709,
198,
6738,
47540,
35636,
1330,
27018,
3103,
9246
] | 4.214286 | 28 |
"""Guessing Game Visualization
You do not need to understand any of the code in this file.
"""
# This section avoids asking for user input.
import lab01
lab01.LOWER = 1
lab01.UPPER = 100
lab01.prompt_for_number = prompt_for_number
lab01.is_correct = is_correct
lab01.is_too_high = is_too_high
# This section runs an algorithm many times.
from collections import defaultdict
import sys
import webbrowser
def get_frequency(algorithm_name, runs=1000):
"""Collect frequencies and plot them."""
if not hasattr(lab01, algorithm_name):
raise ValueError('invalid guessing algorithm ({0})'.format(algorithm_name))
algorithm = getattr(lab01, algorithm_name)
counts = defaultdict(int)
for i in range(runs):
num_guesses = algorithm()
counts[num_guesses] += 1
most_guesses = max(counts)
if most_guesses == 1:
raise ValueError('num_guesses was always 1. Make sure your functions '
'are returning the correct number of guesses!')
xs = range(1, most_guesses+1)
ys = [sum(counts[i] for i in range(1, x+1)) for x in xs]
if algorithm_name == 'guess_binary':
x_axis_string = '|'.join(map(str, xs))
y_axis_string = ','.join(map(str, ys))
chxp = ','.join(map(str, range(int(100 / 2 / most_guesses)+1, 100, int(100 / most_guesses))))
data_string = 'chd=t:{0}&chxl=0:|{1}|2:|Max number of guesses|3:|Frequency|&chxp=0,{3}|2,50|3,{2}'.format(y_axis_string, x_axis_string, runs/2, chxp)
else:
step = max(most_guesses // 10, 1)
x_axis_string = '|'.join(map(str, range(0, most_guesses+1, step)))
y_axis_string = ','.join(map(str, ys))
data_string = 'chd=t:{0}&chxl=0:|{1}|2:|Max number of guesses|3:|Frequency|&chxp=0,0|2,50|3,{2}'.format(y_axis_string, x_axis_string, runs/2)
url = 'http://chart.googleapis.com/chart?cht=bvg&chtt={0}&chxt=x,y,x,y&chs=500x500&{1}&chds=a&chco=3072F3&chbh=a&chm=s,000000,0,-1,5|s,000000,1,-1,5&chdlp=l'.format(algorithm_name, data_string)
webbrowser.open_new(url)
if __name__ == "__main__":
file_name, algorithm_name = sys.argv
get_frequency(algorithm_name) | [
37811,
8205,
27289,
3776,
15612,
1634,
198,
198,
1639,
466,
407,
761,
284,
1833,
597,
286,
262,
2438,
287,
428,
2393,
13,
198,
37811,
198,
198,
2,
770,
2665,
30940,
4737,
329,
2836,
5128,
13,
198,
198,
11748,
2248,
486,
198,
23912,
... | 2.302674 | 935 |
import pandas as pd
name = 'drop-column'
if __name__ == "__main__":
data = [['tom', 10], ['nick', 15], ['juli', 15]]
df = pd.DataFrame(data, columns = ['Name', 'Age'])
args = {
'--columns':[
'Age',
'Name'
]
}
operator(df, args) | [
11748,
19798,
292,
355,
279,
67,
198,
198,
3672,
796,
705,
14781,
12,
28665,
6,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
220,
198,
220,
220,
220,
1366,
796,
16410,
6,
39532,
3256,
838,
4357,
37250,
17172,
3... | 1.94702 | 151 |
# Generated by Django 2.0.7 on 2018-11-02 22:15
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
22,
319,
2864,
12,
1157,
12,
2999,
2534,
25,
1314,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198
] | 2.8125 | 32 |
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
import torchvision.models.resnet as torch_resnet
from torchvision.models.resnet import BasicBlock, Bottleneck
model_urls = {'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
28311,
25,
198,
220,
220,
220,
422,
28034,
13,
40140,
1330,
3440,
62,
5219,
62,
11600,
62,
6738,
62,
6371,
198,
16341,
... | 2.51841 | 1,711 |
import pandas
from ..schema.schema_base import *
from .datastore_base import DataStore
from .odo_datastore import OdoDataStore
from ..config import config
from functools import lru_cache, partial
from sqlalchemy import Table, MetaData, select
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import Select, and_
from sqlalchemy import sql
import io
import tempfile
import time
import os
import datetime
import ciso8601
import odo
metadatas = {}
########################################################################
for col_type in [dt, delta, num, bool_]:
col_type._storage_target_registry['sqlalchemy'] = col_type._storage_target_registry['pandas'].copy()
@cat.register_check('sqlalchemy')
@cat.register_transform('sqlalchemy')
@id_.register_check('sqlalchemy')
@id_.register_transform('sqlalchemy')
########################################################################
@cat.register_metadata('sqlalchemy')
@id_.register_metadata('sqlalchemy')
@dt.register_metadata('sqlalchemy')
@delta.register_metadata('sqlalchemy')
@big_dt.register_metadata('sqlalchemy')
@num.register_metadata('sqlalchemy')
@bool_.register_metadata('sqlalchemy')
########################################################################
@lru_cache()
sa_type_2_col_type = {
sql.sqltypes.Integer: num,
sql.sqltypes.String: cat,
sql.sqltypes.Date: dt,
sql.sqltypes.DateTime: dt,
sql.sqltypes.Interval: delta,
sql.sqltypes.Numeric: num,
sql.sqltypes.Boolean: bool_
}
########################################################################
| [
11748,
19798,
292,
198,
6738,
11485,
15952,
2611,
13,
15952,
2611,
62,
8692,
1330,
1635,
198,
6738,
764,
19608,
459,
382,
62,
8692,
1330,
6060,
22658,
198,
6738,
764,
24313,
62,
19608,
459,
382,
1330,
440,
4598,
6601,
22658,
198,
6738,
... | 3.317919 | 519 |
"""
Fast R-CNN:
data =
{'data': [num_images, c, h, w],
'rois': [num_rois, 5]}
label =
{'label': [num_rois],
'bbox_target': [num_rois, 4 * num_classes],
'bbox_weight': [num_rois, 4 * num_classes]}
roidb extended format [image_index]
['image', 'height', 'width', 'flipped',
'boxes', 'gt_classes', 'gt_overlaps', 'max_classes', 'max_overlaps', 'bbox_targets']
"""
import numpy as np
import numpy.random as npr
from bbox.bbox_regression import expand_bbox_regression_targets
from bbox.bbox_transform import bbox_overlaps, bbox_transform
from utils.image import get_image, tensor_vstack
def get_rcnn_testbatch(roidb, cfg):
"""
return a dict of testbatch
:param roidb: ['image', 'flipped'] + ['boxes']
:return: data, label, im_info
"""
# assert len(roidb) == 1, 'Single batch only'
imgs, roidb = get_image(roidb, cfg)
im_array = imgs
im_info = [np.array([roidb[i]['im_info']], dtype=np.float32) for i in range(len(roidb))]
im_rois = [roidb[i]['boxes'] for i in range(len(roidb))]
if cfg.network.ROIDispatch:
data = []
for i in range(len(im_rois)):
w = im_rois[i][:, 2] - im_rois[i][:, 0] + 1
h = im_rois[i][:, 3] - im_rois[i][:, 1] + 1
feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, 3).astype(int)
rois_0 = im_rois[i][np.where(feat_id == 0)]
if len(rois_0) == 0:
rois_0 = np.zeros((1, 4))
rois_1 = im_rois[i][np.where(feat_id == 1)]
if len(rois_1) == 0:
rois_1 = np.zeros((1, 4))
rois_2 = im_rois[i][np.where(feat_id == 2)]
if len(rois_2) == 0:
rois_2 = np.zeros((1, 4))
rois_3 = im_rois[i][np.where(feat_id == 3)]
if len(rois_3) == 0:
rois_3 = np.zeros((1, 4))
# stack batch index
data.append({'data': im_array[i],
'rois_0': np.hstack((0 * np.ones((rois_0.shape[0], 1)), rois_0)),
'rois_1': np.hstack((0 * np.ones((rois_1.shape[0], 1)), rois_1)),
'rois_2': np.hstack((0 * np.ones((rois_2.shape[0], 1)), rois_2)),
'rois_3': np.hstack((0 * np.ones((rois_3.shape[0], 1)), rois_3))})
if cfg.TEST.LEARN_NMS:
data[-1]['im_info'] = im_info[i]
else:
rois = im_rois
rois_array = [np.hstack((0 * np.ones((rois[i].shape[0], 1)), rois[i])) for i in range(len(rois))]
data = []
for i in range(len(roidb)):
data.append({'data': im_array[i],
'rois': rois_array[i]})
if cfg.TEST.LEARN_NMS:
data[-1]['im_info'] = im_info[i]
label = {}
return data, label, im_info
def get_rcnn_batch(roidb, cfg):
"""
return a dict of multiple images
:param roidb: a list of dict, whose length controls batch size
['images', 'flipped'] + ['gt_boxes', 'boxes', 'gt_overlap'] => ['bbox_targets']
:return: data, label
"""
num_images = len(roidb)
imgs, roidb = get_image(roidb, cfg)
im_array = tensor_vstack(imgs)
assert cfg.TRAIN.BATCH_ROIS == -1 or cfg.TRAIN.BATCH_ROIS % cfg.TRAIN.BATCH_IMAGES == 0, \
'BATCHIMAGES {} must divide BATCH_ROIS {}'.format(cfg.TRAIN.BATCH_IMAGES, cfg.TRAIN.BATCH_ROIS)
if cfg.TRAIN.BATCH_ROIS == -1:
rois_per_image = np.sum([iroidb['boxes'].shape[0] for iroidb in roidb])
fg_rois_per_image = rois_per_image
else:
rois_per_image = cfg.TRAIN.BATCH_ROIS / cfg.TRAIN.BATCH_IMAGES
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image).astype(int)
if cfg.network.ROIDispatch:
rois_array_0 = list()
rois_array_1 = list()
rois_array_2 = list()
rois_array_3 = list()
else:
rois_array = list()
gt_labels_array = list()
labels_array = list()
bbox_targets_array = list()
bbox_weights_array = list()
for im_i in range(num_images):
roi_rec = roidb[im_i]
# infer num_classes from gt_overlaps
num_classes = roi_rec['gt_overlaps'].shape[1]
# label = class RoI has max overlap with
rois = roi_rec['boxes']
labels = roi_rec['max_classes']
overlaps = roi_rec['max_overlaps']
bbox_targets = roi_rec['bbox_targets']
gt_lables = roi_rec['is_gt']
if cfg.TRAIN.BATCH_ROIS == -1:
im_rois, labels_t, bbox_targets, bbox_weights = \
sample_rois_v2(rois, num_classes, cfg, labels=labels, overlaps=overlaps, bbox_targets=bbox_targets,
gt_boxes=None)
assert np.abs(im_rois - rois).max() < 1e-3
assert np.abs(labels_t - labels).max() < 1e-3
else:
im_rois, labels, bbox_targets, bbox_weights, gt_lables = \
sample_rois(rois, fg_rois_per_image, rois_per_image, num_classes, cfg,
labels, overlaps, bbox_targets, gt_lables=gt_lables)
# project im_rois
# do not round roi
if cfg.network.ROIDispatch:
w = im_rois[:, 2] - im_rois[:, 0] + 1
h = im_rois[:, 3] - im_rois[:, 1] + 1
feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, 3).astype(int)
rois_0_idx = np.where(feat_id == 0)[0]
rois_0 = im_rois[rois_0_idx]
if len(rois_0) == 0:
rois_0 = np.zeros((1, 4))
label_0 = -np.ones((1,))
gt_label_0 = -np.ones((1,))
bbox_targets_0 = np.zeros((1, bbox_targets.shape[1]))
bbox_weights_0 = np.zeros((1, bbox_weights.shape[1]))
else:
label_0 = labels[rois_0_idx]
gt_label_0 = gt_lables[rois_0_idx]
bbox_targets_0 = bbox_targets[rois_0_idx]
bbox_weights_0 = bbox_weights[rois_0_idx]
rois_1_idx = np.where(feat_id == 1)[0]
rois_1 = im_rois[rois_1_idx]
if len(rois_1) == 0:
rois_1 = np.zeros((1, 4))
label_1 = -np.ones((1,))
gt_label_1 = -np.ones((1,))
bbox_targets_1 = np.zeros((1, bbox_targets.shape[1]))
bbox_weights_1 = np.zeros((1, bbox_weights.shape[1]))
else:
label_1 = labels[rois_1_idx]
gt_label_1 = gt_lables[rois_1_idx]
bbox_targets_1 = bbox_targets[rois_1_idx]
bbox_weights_1 = bbox_weights[rois_1_idx]
rois_2_idx = np.where(feat_id == 2)
rois_2 = im_rois[rois_2_idx]
if len(rois_2) == 0:
rois_2 = np.zeros((1, 4))
label_2 = -np.ones((1,))
gt_label_2 = -np.ones((1,))
bbox_targets_2 = np.zeros((1, bbox_targets.shape[1]))
bbox_weights_2 = np.zeros((1, bbox_weights.shape[1]))
else:
label_2 = labels[rois_2_idx]
gt_label_2 = gt_lables[rois_2_idx]
bbox_targets_2 = bbox_targets[rois_2_idx]
bbox_weights_2 = bbox_weights[rois_2_idx]
rois_3_idx = np.where(feat_id == 3)
rois_3 = im_rois[rois_3_idx]
if len(rois_3) == 0:
rois_3 = np.zeros((1, 4))
label_3 = -np.ones((1,))
gt_label_3 = -np.ones((1,))
bbox_targets_3 = np.zeros((1, bbox_targets.shape[1]))
bbox_weights_3 = np.zeros((1, bbox_weights.shape[1]))
else:
label_3 = labels[rois_3_idx]
gt_label_3 = gt_lables[rois_3_idx]
bbox_targets_3 = bbox_targets[rois_3_idx]
bbox_weights_3 = bbox_weights[rois_3_idx]
# stack batch index
rois_array_0.append(np.hstack((im_i * np.ones((rois_0.shape[0], 1)), rois_0)))
rois_array_1.append(np.hstack((im_i * np.ones((rois_1.shape[0], 1)), rois_1)))
rois_array_2.append(np.hstack((im_i * np.ones((rois_2.shape[0], 1)), rois_2)))
rois_array_3.append(np.hstack((im_i * np.ones((rois_3.shape[0], 1)), rois_3)))
labels = np.concatenate([label_0, label_1, label_2, label_3], axis=0)
gt_lables = np.concatenate([gt_label_0, gt_label_1, gt_label_2, gt_label_3], axis=0)
bbox_targets = np.concatenate([bbox_targets_0, bbox_targets_1, bbox_targets_2, bbox_targets_3], axis=0)
bbox_weights = np.concatenate([bbox_weights_0, bbox_weights_1, bbox_weights_2, bbox_weights_3], axis=0)
else:
rois = im_rois
batch_index = im_i * np.ones((rois.shape[0], 1))
rois_array_this_image = np.hstack((batch_index, rois))
rois_array.append(rois_array_this_image)
# add labels
gt_labels_array.append(gt_lables)
labels_array.append(labels)
bbox_targets_array.append(bbox_targets)
bbox_weights_array.append(bbox_weights)
gt_labels_array = np.array(gt_labels_array)
nongt_index_array = np.where(gt_labels_array == 0)[1]
labels_array = np.array(labels_array)
bbox_targets_array = np.array(bbox_targets_array)
bbox_weights_array = np.array(bbox_weights_array)
if cfg.network.USE_NONGT_INDEX:
label = {'label': labels_array,
'nongt_index': nongt_index_array,
'bbox_target': bbox_targets_array,
'bbox_weight': bbox_weights_array}
else:
label = {'label': labels_array,
'bbox_target': bbox_targets_array,
'bbox_weight': bbox_weights_array}
if cfg.network.ROIDispatch:
rois_array_0 = np.array(rois_array_0)
rois_array_1 = np.array(rois_array_1)
rois_array_2 = np.array(rois_array_2)
rois_array_3 = np.array(rois_array_3)
# rois_concate = np.concatenate((rois_array_0, rois_array_1, rois_array_2, rois_array_3), axis=1)
# gt_rois_t = rois_concate[:, gt_labels_array[0,:] > 0]
data = {'data': im_array,
'rois_0': rois_array_0,
'rois_1': rois_array_1,
'rois_2': rois_array_2,
'rois_3': rois_array_3}
else:
rois_array = np.array(rois_array)
data = {'data': im_array,
'rois': rois_array}
if cfg.TRAIN.LEARN_NMS:
# im info
im_info = np.array([roidb[0]['im_info']], dtype=np.float32)
# gt_boxes
if roidb[0]['gt_classes'].size > 0:
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
else:
gt_boxes = np.empty((0, 5), dtype=np.float32)
data['im_info'] = im_info
data['gt_boxes'] = gt_boxes
return data, label
def sample_rois_v2(rois, num_classes, cfg,
labels=None, overlaps=None, bbox_targets=None, gt_boxes=None):
"""
generate random sample of ROIs comprising foreground and background examples
:param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index
:param fg_rois_per_image: foreground roi number
:param rois_per_image: total roi number
:param num_classes: number of classes
:param labels: maybe precomputed
:param overlaps: maybe precomputed (max_overlaps)
:param bbox_targets: maybe precomputed
:param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)
:return: (labels, rois, bbox_targets, bbox_weights)
"""
if labels is None:
overlaps = bbox_overlaps(rois[:, 1:].astype(np.float), gt_boxes[:, :4].astype(np.float))
gt_assignment = overlaps.argmax(axis=1)
overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# set labels of bg_rois to be 0
bg_ind = np.where(overlaps < cfg.TRAIN.BG_THRESH_HI)[0]
labels[bg_ind] = 0
# load or compute bbox_target
if bbox_targets is not None:
bbox_target_data = bbox_targets
else:
targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment, :4])
if cfg.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
targets = ((targets - np.array(cfg.TRAIN.BBOX_MEANS))
/ np.array(cfg.TRAIN.BBOX_STDS))
bbox_target_data = np.hstack((labels[:, np.newaxis], targets))
bbox_targets, bbox_weights = \
expand_bbox_regression_targets(bbox_target_data, num_classes, cfg)
return rois, labels, bbox_targets, bbox_weights
def sample_rois(rois, fg_rois_per_image, rois_per_image, num_classes, cfg,
labels=None, overlaps=None, bbox_targets=None, gt_boxes=None, gt_lables=None):
"""
generate random sample of ROIs comprising foreground and background examples
:param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index
:param fg_rois_per_image: foreground roi number
:param rois_per_image: total roi number
:param num_classes: number of classes
:param labels: maybe precomputed
:param overlaps: maybe precomputed (max_overlaps)
:param bbox_targets: maybe precomputed
:param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)
:return: (labels, rois, bbox_targets, bbox_weights)
"""
if labels is None:
overlaps = bbox_overlaps(rois[:, 1:].astype(np.float), gt_boxes[:, :4].astype(np.float))
gt_assignment = overlaps.argmax(axis=1)
overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# foreground RoI with FG_THRESH overlap
fg_indexes = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# guard against the case when an image has fewer than fg_rois_per_image foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_indexes.size)
# Sample foreground regions without replacement
if len(fg_indexes) > fg_rois_per_this_image:
fg_indexes = npr.choice(fg_indexes, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_indexes = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) & (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_indexes.size)
# Sample foreground regions without replacement
if len(bg_indexes) > bg_rois_per_this_image:
bg_indexes = npr.choice(bg_indexes, size=bg_rois_per_this_image, replace=False)
# indexes selected
keep_indexes = np.append(fg_indexes, bg_indexes)
# pad more to ensure a fixed minibatch size
while keep_indexes.shape[0] < rois_per_image:
gap = np.minimum(len(rois), rois_per_image - keep_indexes.shape[0])
gap_indexes = npr.choice(range(len(rois)), size=gap, replace=False)
keep_indexes = np.append(keep_indexes, gap_indexes)
# select gt_labels
gt_lables = gt_lables[keep_indexes]
# select labels
labels = labels[keep_indexes]
# set labels of bg_rois to be 0
bg_ind = np.where(overlaps[keep_indexes] < cfg.TRAIN.BG_THRESH_HI)[0]
labels[bg_ind] = 0
rois = rois[keep_indexes]
# load or compute bbox_target
if bbox_targets is not None:
bbox_target_data = bbox_targets[keep_indexes, :]
else:
targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], :4])
if cfg.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
targets = ((targets - np.array(cfg.TRAIN.BBOX_MEANS))
/ np.array(cfg.TRAIN.BBOX_STDS))
bbox_target_data = np.hstack((labels[:, np.newaxis], targets))
bbox_targets, bbox_weights = \
expand_bbox_regression_targets(bbox_target_data, num_classes, cfg)
return rois, labels, bbox_targets, bbox_weights, gt_lables
| [
37811,
198,
22968,
371,
12,
18474,
25,
198,
7890,
796,
198,
220,
220,
220,
1391,
6,
7890,
10354,
685,
22510,
62,
17566,
11,
269,
11,
289,
11,
266,
4357,
198,
220,
220,
220,
705,
305,
271,
10354,
685,
22510,
62,
305,
271,
11,
642,
... | 1.882739 | 8,528 |
#!/usr/bin/env python
# encoding: utf-8
import os
import sys
from mock import MagicMock, patch
from splunk_eventgen.__main__ import parse_args
from splunk_eventgen.eventgen_core import EventGenerator
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
15290,
1330,
6139,
44,
735,
11,
8529,
198,
198,
6738,
4328,
2954,
62,
15596,
5235,
13,
834,
1241... | 2.836957 | 92 |
""" module to utils methods to file """
import os
import shutil
import logging
from documentstore_migracao import config
logger = logging.getLogger(__name__)
| [
37811,
8265,
284,
3384,
4487,
5050,
284,
2393,
37227,
198,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
18931,
198,
198,
6738,
3188,
8095,
62,
76,
3692,
330,
5488,
1330,
4566,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
... | 3.32 | 50 |
from sphinx.domains.changeset import versionlabels, VersionChange
from sphinx.locale import _ # just to suppress warnings
try:
from sphinx.domains.changeset import versionlabel_classes
except ImportError:
# versionlabel_classes doesn't exist in old Sphinx versions.
UPDATE_VERIONLABEL_CLASSES = False
else:
UPDATE_VERIONLABEL_CLASSES = True
labels = ('versionadded', 'versionchanged', 'deprecated', 'versionextended')
| [
6738,
599,
20079,
87,
13,
3438,
1299,
13,
36653,
316,
1330,
2196,
23912,
1424,
11,
10628,
19400,
198,
6738,
599,
20079,
87,
13,
17946,
1000,
1330,
4808,
1303,
655,
284,
18175,
14601,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
599,
... | 3.251852 | 135 |
'''
@author Tian Shi
Please contact tshi@vt.edu
'''
import json
import os
import random
import gensim
import numpy as np
from tqdm import tqdm
def run_word2vec(args):
'''
Run word2vec.
'''
cluster_dir = '../cluster_results'
if not os.path.exists(cluster_dir):
os.mkdir(cluster_dir)
if not os.path.exists('../nats_results'):
os.mkdir('../nats_results')
fp = open(os.path.join(args.data_dir, args.file_train_w2v), 'r')
sentences = []
for line in tqdm(fp):
itm = json.loads(line)
sentences.append(itm['text_uae'].split())
fp.close()
random.shuffle(sentences)
print('-'*50)
print('Number of sentences: {}'.format(len(sentences)))
print('Begin to train word2vec...')
model = gensim.models.Word2Vec(
sentences,
size=args.emb_size,
window=args.window,
min_count=args.min_count,
workers=args.workers)
model.save(os.path.join(cluster_dir, 'w2v_embedding'))
print('Taining Done.')
print('-'*50)
def convert_vectors(args):
'''
convert vectors and vocab.
'''
cluster_dir = '../cluster_results'
file_vocab = 'vocab.txt'
file_wordvec = 'vectors_w2v'
model = gensim.models.Word2Vec.load(
os.path.join(cluster_dir, 'w2v_embedding'))
lexicon = {}
for word in model.wv.vocab:
if word.strip() == '':
continue
lexicon[word] = model.wv[word]
vocab = []
for wd in lexicon:
vocab.append(wd)
vocab = sorted(vocab)
vec = np.zeros([len(lexicon), args.emb_size])
for k, wd in enumerate(vocab):
vec[k] = lexicon[wd]
print('Vocabulary size: {}'.format(vec.shape[0]))
np.save(os.path.join(cluster_dir, file_wordvec), vec)
fout = open(os.path.join(cluster_dir, file_vocab), 'w')
for k, itm in enumerate(vocab):
itm = [itm, str(k)]
fout.write(' '.join(itm) + '\n')
fout.close()
| [
7061,
6,
198,
31,
9800,
20834,
16380,
198,
5492,
2800,
256,
44019,
31,
36540,
13,
15532,
198,
7061,
6,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
4738,
198,
198,
11748,
308,
641,
320,
198,
11748,
299,
32152,
355,
45941,
198,
67... | 2.13034 | 913 |
from GenerateFolders import generateFolders
from GenerateFiles import generateFiles
from AddictionHelperGenerator import addictionHelperGenerator
import shutil
from Constants import *
generateFolders()
generateFiles()
addictionHelperGenerator()
shutil.make_archive('tobacco_awareness', 'zip', rootdir) | [
6738,
2980,
378,
37,
727,
364,
1330,
7716,
37,
727,
364,
198,
6738,
2980,
378,
25876,
1330,
7716,
25876,
198,
6738,
40187,
47429,
8645,
1352,
1330,
13230,
47429,
8645,
1352,
198,
11748,
4423,
346,
198,
6738,
4757,
1187,
1330,
1635,
198,... | 3.740741 | 81 |
from final_code.fcts_data_formatting import day_to_month, day_to_quarter, import_datasets, time_interval, add_categories, \
HB_to_areas, extract_data, day_to_quarter, month_to_quarter
import numpy as np
import matplotlib.pyplot as plt
data31, data62, operations, diag, covid = import_datasets(['31DayData', '62DayData', 'cancellations_by_board_november_2021', \
'diagnostics_by_board_september_2021', 'covid_2022'])
print(covid)
data31 = time_interval(data31, ['2018Q1', '2020Q1'])
data31 = HB_to_areas(data31)
groupings = {'new_CT':['Breast', 'Cervical'], 'all_reg':['NCA','SCAN','WOSCAN']}
data31 = add_categories(data31, groupings)
print(data31.index.names)
data31.info()
d31 = extract_data(data31, ('all_reg', 'all_reg','new_CT'), ['HB', 'HBT','CancerType'], ['NumberOfEligibleReferrals31DayStandard'])
covid = day_to_quarter(covid)
print(covid)
operations = time_interval(operations, ['201807', '202107'])
operations = HB_to_areas(operations)
print(operations.index.names)
operations.info()
op1, op2 = extract_data(operations, 'NCA', 'HBT', ['TotalOperations', 'TotalCancelled'])
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(op1[0,:],op1[1,:])
every_nth = 4
for n, label in enumerate(ax.xaxis.get_ticklabels()):
if n % every_nth != 0:
label.set_visible(False)
plt.show() | [
6738,
2457,
62,
8189,
13,
69,
310,
82,
62,
7890,
62,
18982,
889,
1330,
1110,
62,
1462,
62,
8424,
11,
1110,
62,
1462,
62,
24385,
11,
1330,
62,
19608,
292,
1039,
11,
640,
62,
3849,
2100,
11,
751,
62,
66,
26129,
11,
3467,
198,
220,... | 2.222045 | 626 |
import sys
| [
11748,
25064,
198
] | 3.666667 | 3 |
from __future__ import absolute_import
| [
171,
119,
123,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198
] | 3.333333 | 12 |
"""
GraphSense API
GraphSense API # noqa: E501
The version of the OpenAPI document: 0.5.1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from graphsense.api_client import ApiClient, Endpoint as _Endpoint
from graphsense.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from graphsense.model.address import Address
from graphsense.model.address_tags import AddressTags
from graphsense.model.address_txs import AddressTxs
from graphsense.model.entity import Entity
from graphsense.model.links import Links
from graphsense.model.neighbors import Neighbors
class AddressesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
| [
37811,
198,
220,
220,
220,
29681,
41166,
7824,
628,
220,
220,
220,
29681,
41166,
7824,
220,
1303,
645,
20402,
25,
412,
33548,
628,
220,
220,
220,
383,
2196,
286,
262,
4946,
17614,
3188,
25,
657,
13,
20,
13,
16,
198,
220,
220,
220,
... | 3.016287 | 307 |
import argparse
import collections
import json
import random
import string
import sys
import types
import bftool
# Default argument capture for the main function
def _get_arguments() -> argparse.Namespace:
"""Default function to prepare the arguments for the `Runner` during it's execution in a terminal
Returns:
- bftool.Arguments with all the configurations provided by the user
"""
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument("-mt", "--max-threads",
help="Maximum number of threads per process", default=1, type=int)
argument_parser.add_argument("-mp", "--max-processes",
help="Maximum number of process to have active at the same time",
default=1, type=int)
argument_parser.add_argument("-w", "--wordlist", help="File wordlist to use"
" based on \"{'argument_1': FILE_PATH, ...}\"",
default="{}")
argument_parser.add_argument("-b", "--bruteforce",
help="Generate a virtual wordlist based on \
rules \"{'argument_1': {'elements': [element_1, ...], 'minlength': INT, 'maxlength': "
"INT, 'string-join': BOOL}, ...}\"",
default="{}")
argument_parser.add_argument("-sf", "--success-function",
help="Function to pass the success result to (default is custom 'print')",
default="lambda output: print(f\"[+] {output}\\n\", end='')")
argument_parser.add_argument("-cf", "--check-function",
help="Function useful to check the output (default is 'lambda output: output')",
default="lambda output: output")
argument_parser.add_argument("-sp", "--script_path", help="Python script to import", default=None, type=str)
argument_parser.add_argument("expression", help="expression that will result in a callable")
return argument_parser.parse_args()
if __name__ == "__main__":
sys.argv[0] = "bftool"
parsed_arguments = _get_arguments()
function_ = import_function(parsed_arguments.expression, parsed_arguments.script_path)
success_function = import_function(parsed_arguments.success_function, parsed_arguments.script_path)
check_function = import_function(parsed_arguments.check_function, parsed_arguments.script_path)
function_arguments = bftool.Arguments(
function_=function_,
files=json.loads(parsed_arguments.wordlist),
bruteforce_rules=json.loads(parsed_arguments.bruteforce),
)
bftool.Pool(
function_,
function_arguments=function_arguments,
check_function=check_function,
success_function=success_function,
max_processes=parsed_arguments.max_processes,
max_threads=parsed_arguments.max_threads
).run()
| [
11748,
1822,
29572,
198,
11748,
17268,
198,
11748,
33918,
198,
11748,
4738,
198,
11748,
4731,
198,
11748,
25064,
198,
11748,
3858,
198,
198,
11748,
275,
701,
970,
628,
198,
2,
15161,
4578,
8006,
329,
262,
1388,
2163,
198,
4299,
4808,
11... | 2.287743 | 1,338 |
from conans import ConanFile, CMake
| [
6738,
369,
504,
1330,
31634,
8979,
11,
327,
12050,
628
] | 3.7 | 10 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from pathlib import Path
import sys
import click
import h5py
import yaml
import lynx
import hoover
import pymaster as nmt
from scipy.optimize import minimize
import emcee
import healpy as hp
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import pandas as pd
import numpy as np
from scipy import stats
from lynx import Masking
_logger = logging.getLogger(__name__)
@click.command()
@click.option('-d', '--data_path', 'data_path', required=True,
type=click.Path(exists=True), help='path to data configuration')
@click.option('-m', '--model_path', 'model_path', required=True,
type=click.Path(exists=False), help='path to model configuration')
@click.option('-p', '--mask_path', 'mask_path', required=True,
type=click.Path(exists=False), help='path to masking configuration')
@click.option('--quiet', 'log_level', flag_value=logging.WARNING, default=True)
@click.option('-v', '--verbose', 'log_level', flag_value=logging.INFO)
@click.option('-vv', '--very-verbose', 'log_level', flag_value=logging.DEBUG)
@click.version_option(lynx.__version__)
if __name__ == '__main__':
main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
18931,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
25064,
198,
198,
11748,
3904,
198,
198,
11748,
289,
20,
9... | 2.678337 | 457 |
"""
Script for running management commands for the Asteroids Game / AI.
Usage: python manage.py [--help]
"""
from ai.experiment import merge_experiments
import click
import settings
class TransparentGroup(click.Group):
"""
A Click Group class that passes all provided
arguments to its subcommands without processing them.
"""
@click.group(cls=TransparentGroup)
@click.pass_context
@manage.command(short_help='Merges experiments into a new experiment',
context_settings=dict(ignore_unknown_options=True,
allow_extra_args=True,))
@click.argument('parent_dirs', nargs=-1)
@click.argument('output_dir')
@click.pass_context
def merge(ctx, parent_dirs, output_dir):
"""
Merges the best brains of the parent experment directories
into a new directory, and initializes (but does not run)
that experiment:
\b
The settings passed to this command will be used to initialize
and perform the initial evaluation of the merged experiment.
\b
Arguments:
parent_dirs - Directories of parent experiments to merge.
output_dir - Directory to place the merged experiment into.
"""
# Remove all options from the directory arguments
parent_dirs = [x for x in list(parent_dirs) if not x.startswith("--")]
if output_dir.startswith("--"):
output_dir = parent_dirs.pop() if len(parent_dirs) > 0 else ""
# Configure settings, then actually merge the experiments
settings.configure_settings()
merge_experiments(parent_dirs, output_dir)
@manage.command('settings', short_help='View configurable settings')
@click.pass_context
def view_settings(ctx):
"""
View the configurable settings for the other commands.
"""
click.echo(settings.cli_configure_settings.get_help(ctx))
if __name__ == "__main__":
manage()
| [
37811,
198,
7391,
329,
2491,
4542,
9729,
329,
262,
38484,
10994,
3776,
1220,
9552,
13,
198,
198,
28350,
25,
21015,
6687,
13,
9078,
685,
438,
16794,
60,
198,
37811,
198,
198,
6738,
257,
72,
13,
23100,
3681,
1330,
20121,
62,
23100,
6800... | 3.046589 | 601 |
from operator import or_
import sqlalchemy
from fastapi import APIRouter, HTTPException, status
from fastapi.param_functions import Depends
from fastapi.responses import JSONResponse
from fastapi import APIRouter
import fastapi as _fastapi
import sqlalchemy.orm as _orm
from bigfastapi.db.database import get_db
from bigfastapi.schemas import plan_schema, tutorial_schema
from bigfastapi.models import plan_model, tutorial_model, user_models
from uuid import uuid4
from bigfastapi import db, users
from typing import List
from sqlalchemy.exc import IntegrityError
from sqlalchemy import func
import datetime as _dt
app = APIRouter(tags=["Tutorials"])
# SAVE TUTORIAL ENDPOINT
@app.post('/tutorial', response_model=tutorial_schema.TutorialSingleRes)
# GET TUTORIALS - Can be filtered by category, title or both
@app.get('/tutorials', response_model=tutorial_schema.TutorialListRes)
# GET TUTORIALS IN GROUPED OF CATEGORIES- Return result as groups of categories
@app.get('/tutorials/group/categories')
# GET A LIST OF ALL TUTORIAL CATEGORIES
@app.get('/tutorials/categories')
# SEARCH TUTORIAL BY MATCHING KEYWORDS
@app.get('/tutorials/search/{keyword}', response_model=tutorial_schema.TutorialListRes)
# UPDATE TUTORIAL DETAILS
@app.put('/tutorials/{itemId}')
@app.delete('/tutorials/{itemId}/user/{userId}')
# --------------------------------------------------------------------------------------------------#
# HELPER FUNCTIONS SECION
# --------------------------------------------------------------------------------------------------#
# SKIP and OFFSET
# SAVE A NEW TUTORIA
# PAGINATION LOGIC
# RUN QUERY
# BUID CATEGORY LIST
# GENERIC STRUCTURED RESPONSE BUILDER
| [
6738,
10088,
1330,
393,
62,
198,
11748,
44161,
282,
26599,
198,
6738,
3049,
15042,
1330,
3486,
4663,
39605,
11,
14626,
16922,
11,
3722,
198,
6738,
3049,
15042,
13,
17143,
62,
12543,
2733,
1330,
2129,
2412,
198,
6738,
3049,
15042,
13,
16... | 3.105735 | 558 |
from PIL import Image, ImageDraw
w = 7200
h = 3600
i = 1
j = 0
k = 0
c6 = [(255, 153, 204), (255, 255, 153), (153, 255, 153), (153, 204, 255)]
black = (0, 0, 0)
white = (255, 255, 255)
im = Image.new('RGB', (w, h), white)
draw = ImageDraw.Draw(im)
r = open('index_src.dat', 'r')
src = r.read()
r.close
src = src.replace(' ', '')
rows = src.split('\n')
for row in rows:
d = row.split('|')
if len(d) == 6:
if len(d[2]) > 0 and len(d[3]) > 0 and len(d[4]) > 0 and len(d[5]) > 0:
ra0 = int((360 - float(d[2])) * 20)
ra1 = int((360 - float(d[3])) * 20)
ra2 = int((ra0 + ra1) / 2)
de0 = int((90 - float(d[4])) * 20)
de1 = int((90 - float(d[5])) * 20)
de2 = int((de0 + de1) / 2)
if i > 4662:
if de2 < k - 3:
j = 0 if j > 2 else (j + 1)
else:
if de2 > k + 3:
j = 0 if j > 2 else (j + 1)
draw.rectangle((ra0, de0, ra1, de1), fill=c6[j], outline=black)
draw.text((ra2, de2), str(i), fill=black)
k = de2
i = i + 1
im.save('tyc_area.png') | [
6738,
350,
4146,
1330,
7412,
11,
7412,
25302,
201,
198,
86,
796,
767,
2167,
201,
198,
71,
796,
4570,
405,
201,
198,
72,
796,
352,
201,
198,
73,
796,
657,
201,
198,
74,
796,
657,
201,
198,
66,
21,
796,
47527,
13381,
11,
24652,
11... | 1.722944 | 693 |
from docutils import nodes
from docutils.parsers.rst import Directive
| [
198,
6738,
2205,
26791,
1330,
13760,
198,
198,
6738,
2205,
26791,
13,
79,
945,
364,
13,
81,
301,
1330,
34736,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
220,
220,
220,
220,
628
] | 2.333333 | 39 |
import imaplib
import email
from email.header import decode_header
import pandas as pd
mails_df = pd.read_csv('mails.csv')
csv_values = mails_df.values
c = 1
with open('mails_with_coupons.csv', 'w', encoding='utf-8') as f:
out_row = 'EMAIL,PASS,COUPONS\n'
f.write(out_row)
for each in csv_values:
user = each[0]
password = each[1]
# Mailbox interaction
M = imaplib.IMAP4_SSL('imap.mail.com')
M.login(user, password)
M.select('Inbox')
typ, data = M.search(None, 'ALL')
ids = data[0]
id_list = ids.split()
# get the most recent email id
latest_email_id = int(id_list[-1])
COUPON_AMOUNT = '15'
# iterate through 15 messages in descending order starting with latest_email_id
# the '-1' dictates reverse looping order
for i in range(latest_email_id, latest_email_id - 15, -1):
typ, data = M.fetch(str(i), '(RFC822)')
for response_part in data:
if isinstance(response_part, tuple):
mail_bytes = response_part[1].decode('UTF-8')
msg = email.message_from_string(mail_bytes)
varSubject = msg['subject']
varFrom = msg['from']
varSubject = decode_header(varSubject)[0][0]
if f'$coupon' in str(varSubject):
print(f'{c} Mail: {user}\n Subject: {varSubject}\n')
with open('mails_with_coupons.csv', 'a') as f:
row = f'{user},{password},"${COUPON_AMOUNT}"\n'
f.write(row)
c += 1
data_frame = pd.read_csv('mails_with_coupons.csv', encoding="utf-8").drop_duplicates(
subset='EMAIL', keep='first', inplace=False)
data_frame.to_csv('mails_with_coupons.csv', index=False, encoding="utf-8")
| [
11748,
545,
64,
489,
571,
201,
198,
11748,
3053,
201,
198,
6738,
3053,
13,
25677,
1330,
36899,
62,
25677,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
201,
198,
201,
198,
26165,
62,
7568,
796,
279,
67,
13,
961,
62,
40664,
... | 2.036545 | 903 |
from rest_framework import serializers
from django.contrib.auth import authenticate
from rest_framework import exceptions
from Air_PnP.models import * | [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
8323,
5344,
198,
6738,
1334,
62,
30604,
1330,
13269,
220,
198,
6738,
3701,
62,
47,
77,
47,
13,
27530,
1330,
1635
] | 3.973684 | 38 |
import datetime
import re
"""
Input is supposed to be in the format yyyy-mm-dd
if it is not then return false
""" | [
11748,
4818,
8079,
198,
11748,
302,
628,
198,
37811,
198,
220,
220,
220,
23412,
318,
4385,
284,
307,
287,
262,
5794,
331,
22556,
88,
12,
3020,
12,
1860,
198,
220,
220,
220,
611,
340,
318,
407,
788,
1441,
3991,
198,
37811
] | 3 | 41 |
#! /usr/bin/env python3
# __author__ = "Praneesh Kataru"
# __credits__ = []
# __version__ = "0.1.1"
# __maintainer__ = "Praneesh Kataru"
# __email__ = "pranuvitmsse05@gmail.com"
# __status__ = "Prototype"
import unittest
from pprint import pprint
from qs_backend.dal.user_stock_pref_dal import UserStockPrefDAL
class UserStockPrefSelectTests(unittest.TestCase):
"""
Unit Test Case for Validating ``UserStockPrefs`` table Selects
""" | [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
220,
220,
11593,
9800,
834,
220,
220,
220,
796,
366,
6836,
1531,
5069,
8595,
11493,
1,
198,
2,
220,
220,
11593,
66,
20696,
834,
220,
220,
796,
17635,
198,
2,
220,
2... | 2.415385 | 195 |
# coding: utf-8
# # Pipeline processing using serial workflows.
#
# This is a serial unrolled version of the predict step
# In[1]:
#get_ipython().run_line_magic('matplotlib', 'inline')
import os
import sys
sys.path.append(os.path.join('..', '..'))
from data_models.parameters import arl_path
from mpi4py import MPI
results_dir = './results/mpi'
#from matplotlib import pylab
#pylab.rcParams['figure.figsize'] = (12.0, 12.0)
#pylab.rcParams['image.cmap'] = 'rainbow'
import numpy
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.wcs.utils import pixel_to_skycoord
#from matplotlib import pyplot as plt
from data_models.polarisation import PolarisationFrame
from wrappers.serial.calibration.calibration import solve_gaintable
from wrappers.serial.calibration.operations import apply_gaintable
from wrappers.serial.calibration.calibration_control import create_calibration_controls
from wrappers.serial.visibility.base import create_blockvisibility
from wrappers.serial.visibility.coalesce import convert_blockvisibility_to_visibility
from wrappers.serial.skycomponent.operations import create_skycomponent
from wrappers.serial.image.deconvolution import deconvolve_cube
#from wrappers.serial.image.operations import show_image, export_image_to_fits, qa_image
from wrappers.serial.image.operations import export_image_to_fits, qa_image
from wrappers.serial.visibility.iterators import vis_timeslice_iter
from wrappers.serial.simulation.testing_support import create_low_test_image_from_gleam
from processing_components.simulation.configurations import create_named_configuration
from wrappers.serial.imaging.base import predict_2d, create_image_from_visibility, advise_wide_field
from workflows.serial.imaging.imaging_serial import invert_list_serial_workflow, predict_list_serial_workflow, deconvolve_list_serial_workflow
from workflows.serial.simulation.simulation_serial import simulate_list_serial_workflow, corrupt_list_serial_workflow
from workflows.serial.pipelines.pipeline_serial import continuum_imaging_list_serial_workflow, ical_list_serial_workflow
from workflows.mpi.pipelines.pipeline_mpi import continuum_imaging_list_mpi_workflow, ical_list_mpi_workflow
from workflows.mpi.imaging.imaging_mpi import predict_list_mpi_workflow, invert_list_mpi_workflow, deconvolve_list_mpi_workflow
import time
import pprint
# Uncomment this line if profiling with extrae/paraver toolset
#import pyextrae.mpi as pyextrae
pp = pprint.PrettyPrinter()
import logging
import argparse
log = init_logging()
parser = argparse.ArgumentParser(description='Imaging pipelines in MPI.')
parser.add_argument('--nfreqwin', type=int, nargs='?', default=7,
help='The number of frequency windows')
args = parser.parse_args()
# In[2]:
# ################### Rationale of data distribution: ################### #
# In this version all data resides at rank0 and needs to be distributed #
# at every function when needed. #
# TODO: Pass on the comm parameter!
# vis_list -> rank0 #
# vis_slices, npixel, cellsize -> rep #
# gleam_model -> rank0 (later rep) #
# predicted_vis -> rank0 (later dist) #
# model_list ->rank0 (later rep)
# disrty_list psf_list -> rank0 (later dist)
# continuum_imaging_list -> rank0
# ####################################################################### #
#pylab.rcParams['figure.figsize'] = (12.0, 12.0)
#pylab.rcParams['image.cmap'] = 'Greys'
# Set up MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# We make the visibility. The parameter rmax determines the distance of the furthest antenna/stations used. All over parameters are determined from this number.
# In[3]:
#nfreqwin=7
nfreqwin=args.nfreqwin
ntimes=5
rmax=300.0
frequency=numpy.linspace(1.0e8,1.2e8,nfreqwin)
#ntimes=11
#frequency=numpy.linspace(0.9e8,1.1e8,nfreqwin)
channel_bandwidth=numpy.array(nfreqwin*[frequency[1]-frequency[0]])
times = numpy.linspace(-numpy.pi/3.0, numpy.pi/3.0, ntimes)
#phasecentre=SkyCoord(ra=+30.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
phasecentre=SkyCoord(ra=+0.0 * u.deg, dec=-40.0 * u.deg, frame='icrs', equinox='J2000')
log.info("Starting imaging-pipeline with %d MPI processes nfreqwin %d ntimes %d" %(size,nfreqwin,ntimes))
print("Starting imaging-pipeline with %d MPI processes nfreqwin %d ntimes %d"
%(size,nfreqwin,ntimes),flush=True)
log.debug('%d: frequency len %d frequency list:'%(rank,len(frequency)))
#print(frequency,flush=True)
if rank == 0:
bvis_list=simulate_list_serial_workflow('LOWBD2',
frequency=frequency,
channel_bandwidth=channel_bandwidth,
times=times,
phasecentre=phasecentre,
order='frequency',
rmax=rmax, format='blockvis')
else:
bvis_list=list()
vis_list = [convert_blockvisibility_to_visibility(bv) for bv in bvis_list]
log.debug('%d: %d elements in vis_list' % (rank,len(vis_list)))
#log.handlers[0].flush()
#print(vis_list
# In[4]:
if rank == 0:
wprojection_planes=1
advice_low=advise_wide_field(vis_list[0], guard_band_image=8.0, delA=0.02,
wprojection_planes=wprojection_planes)
advice_high=advise_wide_field(vis_list[-1], guard_band_image=8.0, delA=0.02,
wprojection_planes=wprojection_planes)
vis_slices = advice_low['vis_slices']
npixel=advice_high['npixels2']
cellsize=min(advice_low['cellsize'], advice_high['cellsize'])
else:
vis_slices = 0
npixel = 0
cellsize = 0
(vis_slices,npixel,cellsize) = comm.bcast((vis_slices,npixel,cellsize),root=0)
log.debug('%d: After advice: vis_slices %d npixel %d cellsize %d' % (rank,vis_slices, npixel, cellsize))
# Now make a graph to fill with a model drawn from GLEAM
# In[ ]:
log.info('%d:About to make GLEAM model' %(rank))
sub_frequency = numpy.array_split(frequency, size)
sub_channel_bandwidth = numpy.array_split(channel_bandwidth,size)
sub_gleam_model = [create_low_test_image_from_gleam(npixel=npixel,
frequency=[sub_frequency[rank][f]],
channel_bandwidth=[sub_channel_bandwidth[rank][f]],
cellsize=cellsize,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame("stokesI"),
flux_limit=1.0,
applybeam=True)
for f, freq in enumerate(sub_frequency[rank])]
# NOTE: We could do an allgather here to avoid bcast of
# each freqw during predict, it would safe time but use more space
gleam_model=comm.gather(sub_gleam_model,root=0)
if rank==0:
gleam_model=numpy.concatenate(gleam_model)
else:
gleam_model=list()
# In[ ]:
original_predict=False
if original_predict:
if rank==0:
log.info('About to run predict to get predicted visibility')
predicted_vislist = predict_list_serial_workflow(vis_list, gleam_model,
context='wstack', vis_slices=vis_slices)
else:
log.info('%d: About to run predict to get predicted visibility'%(rank))
print('%d: About to run predict to get predicted visibility'%(rank),flush=True)
start=time.time()
# All procs call the function but only rank=0 gets the predicted_vislist
predicted_vislist = predict_list_mpi_workflow(vis_list, gleam_model,
context='wstack',
vis_slices=vis_slices)
end=time.time()
#log.info('About to run corrupt to get corrupted visibility')
#corrupted_vislist = corrupt_list_serial_workflow(predicted_vislist, phase_error=1.0)
# Get the LSM. This is currently blank.
# In[ ]:
### I need to scatter vis_list cause worker don't have it
## frequency and channel_bandwidth are replicated and they have already
## been split
log.info('%d: predict finished in %f seconds'%(rank,end-start))
print('%d: predict finished in %f seconds'%(rank,end-start),flush=True)
log.info('%d: About create image from visibility'%(rank))
sub_vis_list= numpy.array_split(vis_list, size)
sub_vis_list=comm.scatter(sub_vis_list,root=0)
sub_model_list = [create_image_from_visibility(sub_vis_list[f],
npixel=npixel,
frequency=[sub_frequency[rank][f]],
channel_bandwidth=[sub_channel_bandwidth[rank][f]],
cellsize=cellsize,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame("stokesI"))
for f, freq in enumerate(sub_frequency[rank])]
# NOTE: We could do allgather here, if enough memory space
model_list=comm.gather(sub_model_list,root=0)
if rank==0:
#model_list=numpy.concatenate(model_list)
model_list=concat_tuples(model_list)
# In[ ]:
else:
model_list=list()
log.debug('%d model_list len %d' %(rank,len(model_list)))
log.info('%d: About to start invert'%(rank))
print('%d: About to start invert'%(rank),flush=True)
start=time.time()
original_invert=False
if original_invert:
if rank==0:
dirty_list = invert_list_serial_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=False)
psf_list = invert_list_serial_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=True)
else:
dirty_list = invert_list_mpi_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=False)
psf_list = invert_list_mpi_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=True)
# Create and execute graphs to make the dirty image and PSF
# In[ ]:
end=time.time()
log.info('%d: invert finished'%(rank))
print('%d: invert finished in %f seconds'%(rank,end-start),flush=True)
if rank==0:
#print("sumwts",flush=True)
#print(dirty_list[0][1])
log.info('After invert to get dirty image')
dirty = dirty_list[0][0]
#show_image(dirty, cm='Greys', vmax=1.0, vmin=-0.1)
#plt.show()
print(qa_image(dirty))
export_image_to_fits(dirty, '%s/imaging-dirty.fits'
%(results_dir))
log.info('After invert to get PSF')
psf = psf_list[0][0]
#show_image(psf, cm='Greys', vmax=0.1, vmin=-0.01)
#plt.show()
print(qa_image(psf))
export_image_to_fits(psf, '%s/imaging-psf.fits'
%(results_dir))
# Now deconvolve using msclean
# In[ ]:
log.info('%d: About to run deconvolve'%(rank))
print('%d: About to run deconvolve'%(rank),flush=True)
start=time.time()
original_deconv=False
if original_deconv:
if rank==0:
deconvolved,_ = deconvolve_list_serial_workflow(dirty_list, psf_list, model_imagelist=model_list,
deconvolve_facets=8, deconvolve_overlap=16, deconvolve_taper='tukey',
scales=[0, 3, 10],
algorithm='msclean', niter=1000,
fractional_threshold=0.1,
threshold=0.1, gain=0.1, psf_support=64)
else:
print(" types of dirty list",type(dirty_list)," and psf_list",type(psf_list))
deconvolved = deconvolve_list_mpi_workflow(dirty_list, psf_list, model_imagelist=model_list,
deconvolve_facets=8, deconvolve_overlap=16, deconvolve_taper='tukey',
scales=[0, 3, 10],
algorithm='msclean', niter=1000,
fractional_threshold=0.1,
threshold=0.1, gain=0.1, psf_support=64)
#show_image(deconvolved[0], cm='Greys', vmax=0.1, vmin=-0.01)
#plt.show()
end=time.time()
log.info('%d: After deconvolve'%(rank))
print('%d: deconvolve finished in %f sec'%(rank,end-start))
# In[ ]:
log.info('%d: About to run continuum imaging'%(rank))
print('%d: About to run continuum imaging'%(rank),flush=True)
start=time.time()
original_continuumimaging=False
if original_continuumimaging:
if rank==0:
continuum_imaging_list = continuum_imaging_list_serial_workflow(predicted_vislist,
model_imagelist=model_list,
context='wstack', vis_slices=vis_slices,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8, deconvolve_overlap=16,
deconvolve_taper='tukey', psf_support=64)
else:
continuum_imaging_list = continuum_imaging_list_mpi_workflow(predicted_vislist,
model_imagelist=model_list,
context='wstack', vis_slices=vis_slices,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8, deconvolve_overlap=16,
deconvolve_taper='tukey', psf_support=64)
# In[ ]:
end=time.time()
log.info('%d: continuum imaging finished'%(rank))
print('%d: continuum imaging finished in %f sec.'%(rank,end-start),flush=True)
if rank==0:
deconvolved = continuum_imaging_list[0][0]
residual = continuum_imaging_list[1][0]
restored = continuum_imaging_list[2][0]
#f=show_image(deconvolved, title='Clean image - no selfcal', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(deconvolved, context='Clean image - no selfcal'))
#plt.show()
#f=show_image(restored, title='Restored clean image - no selfcal',
# cm='Greys', vmax=1.0, vmin=-0.1)
print(qa_image(restored, context='Restored clean image - no selfcal'))
#plt.show()
export_image_to_fits(restored, '%s/imaging-dask_continuum_imaging_restored.fits'
%(results_dir))
#f=show_image(residual[0], title='Residual clean image - no selfcal', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(residual[0], context='Residual clean image - no selfcal'))
#plt.show()
export_image_to_fits(residual[0], '%s/imaging-dask_continuum_imaging_residual.fits'
%(results_dir))
if rank==0:
for chan in range(nfreqwin):
residual = continuum_imaging_list[1][chan]
#show_image(residual[0], title='Channel %d' % chan, cm='Greys',
# vmax=0.1, vmin=-0.01)
#plt.show()
# In[ ]:
controls = create_calibration_controls()
controls['T']['first_selfcal'] = 1
controls['G']['first_selfcal'] = 3
controls['B']['first_selfcal'] = 4
controls['T']['timeslice'] = 'auto'
controls['G']['timeslice'] = 'auto'
controls['B']['timeslice'] = 1e5
pp.pprint(controls)
# In[ ]:
# TODO I change this to predicted_vislist to make it deterministic, I hope it makes
# sense :)
#ical_list = ical_list_serial_workflow(corrupted_vislist,
log.info('%d: About to run ical'%(rank))
print('%d: About to run ical'%(rank),flush=True)
start=time.time()
original_ical=False
if original_ical:
if rank==0:
ical_list = ical_list_serial_workflow(predicted_vislist,
model_imagelist=model_list,
context='wstack',
calibration_context = 'TG',
controls=controls,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8,
deconvolve_overlap=16,
deconvolve_taper='tukey',
vis_slices=ntimes,
timeslice='auto',
global_solution=False,
psf_support=64,
do_selfcal=True)
else:
ical_list = ical_list_mpi_workflow(predicted_vislist,
model_imagelist=model_list,
context='wstack',
calibration_context = 'TG',
controls=controls,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8,
deconvolve_overlap=16,
deconvolve_taper='tukey',
vis_slices=ntimes,
timeslice='auto',
global_solution=False,
psf_support=64,
do_selfcal=True)
# In[ ]:
end=time.time()
log.info('%d: ical finished '%(rank))
print('%d: ical finished in %f sec.'%(rank,end-start),flush=True)
if rank==0:
log.info('After ical')
deconvolved = ical_list[0][0]
residual = ical_list[1][0]
restored = ical_list[2][0]
#f=show_image(deconvolved, title='Clean image', cm='Greys', vmax=1.0, vmin=-0.1)
print(qa_image(deconvolved, context='Clean image'))
#plt.show()
#f=show_image(restored, title='Restored clean image', cm='Greys', vmax=1.0,
# vmin=-0.1)
print(qa_image(restored, context='Restored clean image'))
#plt.show()
export_image_to_fits(restored, '%s/imaging-dask_ical_restored.fits'
%(results_dir))
#f=show_image(residual[0], title='Residual clean image', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(residual[0], context='Residual clean image'))
#plt.show()
export_image_to_fits(residual[0], '%s/imaging-dask_ical_residual.fits'
%(results_dir))
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
37709,
7587,
1262,
11389,
670,
44041,
13,
198,
2,
220,
198,
2,
770,
318,
257,
11389,
555,
8375,
2196,
286,
262,
4331,
2239,
198,
198,
2,
554,
58,
16,
5974,
628,
198,
2,
1... | 1.926533 | 10,372 |
from __future__ import absolute_import
import logging
# to change log level globally, use eg logconfig.loglevel(logging.WARN)
# to change level for an individual module, eg logconfig.loglevel(logging.DEBUG, "framedata")
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
11748,
18931,
198,
198,
2,
284,
1487,
2604,
1241,
18309,
11,
779,
29206,
2604,
11250,
13,
75,
2467,
626,
7,
6404,
2667,
13,
37771,
8,
198,
2,
284,
1487,
1241,
329,
281,
1981,
8265... | 3.507937 | 63 |
expected_output = {
'policy_map': {
'policy-cbwfq-1': {'class': {
'class-gold': {'bandwidth_percent': '40',
'random_detect': ['dscp-based', 'ecn']},
'class-silver': {'bandwidth_percent': '20',
'random_detect': ['dscp-based', 'ecn']},
'class-bronze': {'bandwidth_percent': '10',
'random_detect': ['dscp-based', 'ecn']},
'management-traffic': {'bandwidth_percent': '1',
'random_detect': ['dscp-based', 'ecn'],
'qos_set': {'dscp': 'af21'}},
'class-default': {'bandwidth_percent': '29',
'random_detect': ['dscp-based', 'ecn'],
'qos_set': {'dscp': 'default'}}}}
}
} | [
40319,
62,
22915,
796,
1391,
198,
220,
220,
220,
705,
30586,
62,
8899,
10354,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
705,
30586,
12,
21101,
86,
69,
80,
12,
16,
10354,
1391,
6,
4871,
10354,
1391,
198,
220,
220,
220,
220,
220... | 1.627151 | 523 |
# https://github.com/ArtemNikolaev/gb-hw/issues/26
from functools import reduce
print(multiply())
| [
2,
3740,
1378,
12567,
13,
785,
14,
8001,
368,
40979,
5708,
1990,
14,
22296,
12,
36599,
14,
37165,
14,
2075,
198,
6738,
1257,
310,
10141,
1330,
4646,
628,
628,
198,
4798,
7,
16680,
541,
306,
28955,
198
] | 2.756757 | 37 |
Link = "https://practice.geeksforgeeks.org/problems/merge-two-sorted-arrays-1587115620/1"
Description = "Given two sorted arrays arr1[] and arr2[] of sizes n and m in non-decreasing order." \
"Merge them in sorted order without using any extra space. Modify arr1 so that it" \
"contains the first N elements and modify arr2 so that it contains the last M elements."
Examples = "Input: " \
"n = 4, arr1[] = [1 3 5 7] " \
"m = 5, arr2[] = [0 2 6 8 9]" \
"Output: " \
"arr1[] = [0 1 2 3]" \
"arr2[] = [5 6 7 8 9]" \
"Explanation: After merging the two non-decreasing arrays, we get, 0 1 2 3 5 6 7 8 9."
arr1 = [1,36,39,105,146,154,168,170,204,206,217,219,225,227,272,282,293,300,312,323,328,328,334,335,359,370,383,392,395,396,403,413,422,437,443,448,462,463,465,479,492,496]
arr2 = [7,22,30,36,38,38,39,41,42,48,49,83,85,102,107,116,119,124,127,130,140,142,145,149,159,163,165,174,174,191,205,212,224,230,242,246,254,257,258,265,279,289,306,307,309,317,324,334,341,343,351,360,369,371,377,387,391,394,430,431,432,440,443,445,447,455,467,478]
n = 42
m = 68
# Approach 1
print(Solution1().merge(arr1, arr2, n, m))
| [
11280,
796,
366,
5450,
1378,
39541,
13,
469,
2573,
30293,
2573,
13,
2398,
14,
1676,
22143,
14,
647,
469,
12,
11545,
12,
82,
9741,
12,
3258,
592,
12,
1314,
5774,
1157,
3980,
1238,
14,
16,
1,
198,
11828,
796,
366,
15056,
734,
23243,
... | 2.132743 | 565 |
# Copyright 2019 Amazon.com, Inc. or its affiliates.
# Licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| [
2,
15069,
13130,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
198,
2,
366,
34156,
15341,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
198,
2,
351,
... | 3.864516 | 155 |
#!/usr/bin/env python
from setuptools import setup, find_packages
import io
setup(
name='tomago-sdk-py',
version='1.5.1',
description="Python SDKs for Blockchain.",
long_description=io.open('README.md', encoding='utf-8').read(),
url='https://github.com/arxanchain/tomago-sdk-py/',
download_url='https://github.com/arxanchain/tomago-sdk-py/',
packages=find_packages(),
platforms='any',
install_requires=[
"mock==2.0.0",
"requests==2.18.4",
"six==1.11.0",
"urllib3==1.22",
"py-common==v1.5.1"
],
dependency_links=[
"git+git://github.com/arxanchain/py-common.git@v1.5.1#egg=py-common-v1.5.1"
],
include_package_data=True,
zip_safe=False,
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
11748,
33245,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
39532,
3839,
12,
21282,
74,
12,
9078,
3256,
198,
... | 2.098315 | 356 |
#!/usr/bin/env python
# Copyright (c) 2017, the R8 project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import create_maven_release
import gradle
import jdk
import optparse
import os
try:
import resource
except ImportError:
# Not a Unix system. Do what Gandalf tells you not to.
pass
import shutil
import subprocess
import sys
import toolhelper
import utils
import zipfile
from build_r8lib import build_r8lib
ARCHIVE_BUCKET = 'r8-releases'
if __name__ == '__main__':
sys.exit(Main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
66,
8,
2177,
11,
262,
371,
23,
1628,
7035,
13,
4222,
766,
262,
37195,
20673,
2393,
198,
2,
329,
3307,
13,
1439,
2489,
10395,
13,
5765,
286,
428,
2723,
2438,
318,
218... | 3.227979 | 193 |
# Copyright 2020 BlueChasm LLC dba OsmosisAI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from dataclasses import dataclass
from typing import List
@dataclass
@dataclass
@dataclass
| [
2,
220,
15069,
12131,
4518,
1925,
8597,
11419,
288,
7012,
440,
5796,
5958,
20185,
13,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
... | 3.570048 | 207 |
import numpy as np
import math
def alpha_help(a,n):
"""function to compute some approximations
Parameters
----------
a : complex
number
n : int
number
Returns
ln : complex
approximation
"""
if a.real == 0 and a.imag == 0:
if n == 0:
ln = np.complex(0,0)
else:
ln = np.complex(-1e200,0)
elif n >= 300:
ln = n *np.log(a)- (n*np.log(n)-n + np.log(2*np.pi*n)/2)/2
else:
ln = n * np.log(a) - math.log(math.factorial(int(n)))/2
return ln
def find_norm(z):
"""find complex norm^2 of a vector of complex numbers"""
k = 0
for i in z:
k = k + (i * np.conj(i)).real
return k
def setup_scaled_H(q, c, n, m, nmaxfinal):
"""function to setup tridigonal Hamiltonian if first, return d,e
Parameters
----------
q : float
quadratic zeeman shift
c : float
c_2n, spinor interaction rate
n : int
number of particles
m : int
magnetization
nmaxfinal : int
deprecated
Returns
-------
e_min : float
minimum eigenvalue
e_max : float
maximum eigenvalue
d : np.array(complex)
diagonal elements of Hamiltonian
e : np.array(complex)
off diagonal elements of Hamiltonian
first_n0 : int
n-|m| % 2
"""
first_n0 = np.mod(n-abs(m), 2)
n0 = np.mod((n-abs(m)), 2)
nmax = int((n-abs(m)-n0)/2 + 1)
#create arrays
e = np.zeros(int(nmax)-1)
d = np.zeros(int(nmax))
c_local = c/n
#matrix elements of hamiltonian
nm = (n - n0 - m)/2
npp = (n - n0 + m)/2
for j in range(int(nmax)):
d[j] = (n-n0)*(q+0.5*c_local*(2*n0-1))
if j < (nmax-1):
e[j] = c_local*np.sqrt(nm*npp*(n0+2)*(n0+1))
nm = nm - 1
npp = npp - 1
n0 = n0 + 2
#estimate based on Gershgorin's circle theorem
radius = abs(e[0])
e_min = d[0] - radius
e_max = d[0] + radius
for j in range(2,int(nmax)-1):
radius = abs(e[j-2]) + abs(e[j-1])
e_min = min(e_min, d[j-1] - radius)
e_max = max(e_max, d[j-1] + radius)
radius = abs(e[nmax-2])
e_min = min(e_min, d[nmax-1] - radius)
e_max = max(e_max, d[nmax-1] + radius)
radius = (e_max + e_min)/2
for i in range(int(nmax)):
d[i] = d[i] - radius
radius = 2/(e_max-e_min)
d = np.multiply(radius,d)
e = np.multiply(radius,e)
return e_min, e_max ,d ,e, first_n0
def hamiltonian_c(n_max, in_w, e, d):
"""apply tridiagonal real Hamiltonian matrix to a complex vector
Parameters
----------
n_max : int
maximum n for cutoff
in_w : np.array(complex)
state in
d : np.array(complex)
diagonal elements of Hamiltonian
e : np.array(complex)
off diagonal elements of Hamiltonian
Returns
-------
out_w : np.array(complex)
application of Hamiltonian to vector
"""
n_max = int(n_max)
out_w = in_w[:n_max]*d[:n_max]
out_w[:(n_max-1)] += e[:(n_max-1)]*in_w[1:n_max]
out_w[1:n_max] += e[:n_max-1] * in_w[:n_max-1]
return out_w
def moments(wave, n):
"""mean and variance of wavefunction
Parameters
----------
wave : np.array(complex)
wavefunction
n : int
number of atoms
Returns
-------
x : float
mean of wavefunction
x2 : float
variance of wavefunction
"""
nn = np.arange(n, n+2*len(wave), 2)
Y = (wave * np.conj(wave)).real
x = np.sum(Y * nn)
x2 = np.sum(Y * nn * nn)
return x, x2 | [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
198,
4299,
17130,
62,
16794,
7,
64,
11,
77,
2599,
198,
220,
220,
220,
37227,
8818,
284,
24061,
617,
5561,
320,
602,
198,
220,
220,
220,
220,
198,
220,
220,
220,
40117,
198,
220,... | 1.92731 | 1,926 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
from .common import unittest
from squint._compatibility.itertools import islice
from squint._utils import IterItems
from squint.result import Result
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
11748,
25064,
198,
6738,
764,
11321,
1330,
555,
715,
395,
198,
6738,
2809,
600,
13557,
5589,
25901,
13,
270,
861,
10... | 3.338235 | 68 |
# multiAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from __future__ import division
from util import manhattanDistance
from game import Directions
import random, util
from game import Agent
class ReflexAgent(Agent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
The code below is provided as a guide. You are welcome to change
it in any way you see fit, so long as you don't touch our method
headers.
"""
def getAction(self, gameState):
"""
You do not need to change this method, but you're welcome to.
getAction chooses among the best options according to the evaluation function.
Just like in the previous project, getAction takes a GameState and returns
some Directions.X for some X in the set {North, South, West, East, Stop}
"""
# Collect legal moves and successor states
legalMoves = gameState.getLegalActions()
# Choose one of the best actions
scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
bestScore = max(scores)
bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
chosenIndex = random.choice(bestIndices) # Pick randomly among the best
"Add more of your code here if you want to"
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState, action):
"""
Design a better evaluation function here.
The evaluation function takes in the current and proposed successor
GameStates (pacman.py) and returns a number, where higher numbers are better.
The code below extracts some useful information from the state, like the
remaining food (newFood) and Pacman position after moving (newPos).
newScaredTimes holds the number of moves that each ghost will remain
scared because of Pacman having eaten a power pellet.
Print out these variables to see what you're getting, then combine them
to create a masterful evaluation function.
"""
# Useful information you can extract from a GameState (pacman.py)
successorGameState = currentGameState.generatePacmanSuccessor(action)
newPos = successorGameState.getPacmanPosition()
newFood = successorGameState.getFood()
newGhostStates = successorGameState.getGhostStates()
newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
food_left = sum(int(j) for i in newFood for j in i)
if food_left > 0:
food_distances = [manhattanDistance(newPos, (x, y))
for x, row in enumerate(newFood)
for y, food in enumerate(row)
if food]
shortest_food = min(food_distances)
else:
shortest_food = 0
if newGhostStates:
ghost_distances = [manhattanDistance(ghost.getPosition(), newPos)
for ghost in newGhostStates]
shortest_ghost = min(ghost_distances)
if shortest_ghost == 0:
shortest_ghost = -2000
else:
shortest_ghost = -5 / shortest_ghost
else:
shortest_ghost = 0
return -2 * shortest_food + shortest_ghost - 40 * food_left
def scoreEvaluationFunction(currentGameState):
"""
This default evaluation function just returns the score of the state.
The score is the same one displayed in the Pacman GUI.
This evaluation function is meant for use with adversarial search agents
(not reflex agents).
"""
return currentGameState.getScore()
class MultiAgentSearchAgent(Agent):
"""
This class provides some common elements to all of your
multi-agent searchers. Any methods defined here will be available
to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
You *do not* need to make any changes here, but you can if you want to
add functionality to all your adversarial search agents. Please do not
remove anything, however.
Note: this is an abstract class: one that should not be instantiated. It's
only partially specified, and designed to be extended. Agent (game.py)
is another abstract class.
"""
class MinimaxAgent(MultiAgentSearchAgent):
"""
Your minimax agent (question 2)
"""
def getAction(self, gameState):
"""
Returns the minimax action from the current gameState using self.depth
and self.evaluationFunction.
Here are some method calls that might be useful when implementing minimax.
gameState.getLegalActions(agentIndex):
Returns a list of legal actions for an agent
agentIndex=0 means Pacman, ghosts are >= 1
gameState.generateSuccessor(agentIndex, action):
Returns the successor game state after an agent takes an action
gameState.getNumAgents():
Returns the total number of agents in the game
"""
return max(
gameState.getLegalActions(0),
key = lambda x: search_depth(gameState.generateSuccessor(0, x), 1, 1)
)
class AlphaBetaAgent(MultiAgentSearchAgent):
"""
Your minimax agent with alpha-beta pruning (question 3)
"""
def getAction(self, gameState):
"""
Returns the minimax action using self.depth and self.evaluationFunction
"""
val, alpha, beta, best = None, None, None, None
for action in gameState.getLegalActions(0):
val = max(val, min_val(gameState.generateSuccessor(0, action), 1, 1, alpha, beta))
# if val >= beta: return action
if alpha is None:
alpha, best = val, action
else:
alpha, best = max(val, alpha), action if val > alpha else best
return best
class ExpectimaxAgent(MultiAgentSearchAgent):
"""
Your expectimax agent (question 4)
"""
def getAction(self, gameState):
"""
Returns the expectimax action using self.depth and self.evaluationFunction
All ghosts should be modeled as choosing uniformly at random from their
legal moves.
"""
return max(
gameState.getLegalActions(0),
key = lambda x: search_depth(gameState.generateSuccessor(0, x), 1, 1)
)
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"Search the node that has the lowest combined cost and heuristic first."
visited = set()
p_queue = util.PriorityQueue()
p_queue.push((problem.getStartState(), []), 0)
while not p_queue.isEmpty():
state, actions = p_queue.pop()
if state in visited:
continue
visited.add(state)
if problem.isGoalState(state):
return actions
for successor, action, stepCost in problem.getSuccessors(state):
if successor not in visited:
p_queue.push(
(successor, actions + [action]),
stepCost + problem.getCostOfActions(actions) +
heuristic(successor, problem = problem))
from game import Actions
class PositionSearchProblem:
"""
A search problem defines the state space, start state, goal test,
successor function and cost function. This search problem can be
used to find paths to a particular point on the pacman board.
The state space consists of (x,y) positions in a pacman game.
Note: this search problem is fully specified; you should NOT change it.
"""
def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True):
"""
Stores the start and goal.
gameState: A GameState object (pacman.py)
costFn: A function from a search state (tuple) to a non-negative number
goal: A position in the gameState
"""
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
if start != None: self.startState = start
self.goal = goal
self.costFn = costFn
self.visualize = visualize
if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):
print 'Warning: this does not look like a regular search maze'
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but
has a different goal test, which you need to fill in below. The
state space and successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in
the findPathToClosestDot method.
"""
def __init__(self, gameState):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test
that will complete the problem definition.
"""
x,y = state
return self.food[x][y]
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def betterEvaluationFunction(currentGameState):
"""
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
evaluation function (question 5).
DESCRIPTION: This function evaluates a state based on the sum of
six weighted variables:
- Distance of path to nearest food pellet
- Manhattan distance to closest offensive ghost
- Manhattan distance to closest power pellet
- Number of power pellets left
- Number of food pellets left
- Manhattan distance to closest scared ghost
For some of the variables, the reciprocal was taken based on the
following methodology:
- The reciprocal of the distance to closest food pellet
- A close food pellet is a good thing, but we want grabbing
one to have a limited value on the change in score
- The score drop due to the increased distance to the next
nearest pellet should be less than the score gain from
eating the pellet.
- The negative reciprocal of the distance to the closest ghost
- A close ghost makes the state less desirable, but variances
in ghosts far away should have little impact
- The reciprocal of the distance to the closest power pellet
- Same reasoning as food pellets
"""
pos = currentGameState.getPacmanPosition()
food = currentGameState.getFood()
ghosts = currentGameState.getGhostStates()
capsules = currentGameState.getCapsules()
food_left = sum(int(j) for i in food for j in i)
# Nom them foods
problem = AnyFoodSearchProblem(currentGameState)
shortest_food = aStarSearch(problem, heuristic = nearest_food_heuristic)
if shortest_food:
shortest_food = 1 / len(shortest_food)
else:
shortest_food = 1000
# if food_left > 0:
# food_distances = [
# manhattanDistance(pos, (x, y))
# for x, row in enumerate(food)
# for y, food_bool in enumerate(row)
# if food_bool
# ]
# shortest_food = 1 / min(food_distances)
# else:
# shortest_food = -200000
scared = [ghost for ghost in ghosts if ghost.scaredTimer > 0]
ghosts = [ghost for ghost in ghosts if ghost.scaredTimer == 0]
# Don't let the ghost nom you
if ghosts:
ghost_distances = [manhattanDistance(ghost.getPosition(), pos)
for ghost in ghosts]
shortest_ghost = min(ghost_distances)
if shortest_ghost == 0:
shortest_ghost = 200000
else:
shortest_ghost = 1 / shortest_ghost
else:
shortest_ghost = 0
# Nom them scared ones
shortest_scared = 0
if scared:
scared_distances = [manhattanDistance(ghost.getPosition(), pos)
for ghost in scared]
scared_distances = [distance
for ghost, distance in zip(scared, scared_distances)
if distance <= ghost.scaredTimer]
if scared_distances:
shortest_scared = min(scared_distances)
if shortest_scared == 0:
shortest_scared = 10
else:
shortest_scared = 1 / shortest_scared
# Nom them capsules
capsules_left = len(capsules)
if capsules:
capsule_distances = [manhattanDistance(capsule, pos)
for capsule in capsules]
shortest_capsule = 1 / min(capsule_distances)
else:
shortest_capsule = 0
weights = [5, 10, -5, -50, -100, 10]
scores = [shortest_food, shortest_capsule, shortest_ghost,
food_left, capsules_left, shortest_scared]
score = sum(i * j for i, j in zip(scores, weights))
# print "pos\t\t\t", pos
# print "shortest food\t\t", shortest_food
# print "food_left\t\t", food_left
# print "shortest_capsule\t", shortest_capsule
# print "score\t\t\t", score
# print
return score
# Abbreviation
better = betterEvaluationFunction
class ContestAgent(MultiAgentSearchAgent):
"""
Your agent for the mini-contest
"""
def getAction(self, gameState):
"""
Returns an action. You can use any method you want and search to any depth you want.
Just remember that the mini-contest is timed, so you have to trade off speed and computation.
Ghosts don't behave randomly anymore, but they aren't perfect either -- they'll usually
just make a beeline straight towards Pacman (or away from him if they're scared!)
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
| [
2,
5021,
10262,
658,
13,
9078,
198,
2,
220,
26171,
198,
2,
10483,
26426,
6188,
25,
220,
921,
389,
1479,
284,
779,
393,
9117,
777,
4493,
329,
198,
2,
9856,
4959,
2810,
326,
357,
16,
8,
345,
466,
407,
14983,
393,
7715,
198,
2,
813... | 2.616242 | 6,637 |
""" XVM (c) www.modxvm.com 2013-2017 """
#####################################################################
# imports
import simplejson
import traceback
import BigWorld
import game
from Avatar import PlayerAvatar
from BattleReplay import BattleReplay, g_replayCtrl
from PlayerEvents import g_playerEvents
from gui.shared import g_eventBus, events
from xfw import *
import xvm_main.python.config as config
from xvm_main.python.logger import *
import xvm_main.python.minimap_circles as minimap_circles
import xvm_main.python.utils as utils
from consts import *
#####################################################################
# handlers
_xvm_record_data = None
_xvm_play_data = None
@registerEvent(PlayerAvatar, 'onBecomePlayer')
# record
g_eventBus.addListener(XVM_BATTLE_EVENT.XMQP_MESSAGE, onXmqpMessage)
@registerEvent(game, 'fini')
@overrideMethod(BattleReplay, 'stop')
# play
| [
37811,
1395,
15996,
357,
66,
8,
7324,
13,
4666,
87,
14761,
13,
785,
2211,
12,
5539,
37227,
198,
198,
29113,
29113,
4242,
2,
198,
2,
17944,
198,
198,
11748,
2829,
17752,
198,
11748,
12854,
1891,
198,
198,
11748,
4403,
10603,
198,
11748... | 3.263538 | 277 |
import logging
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import LambdaLR, StepLR, MultiStepLR
from torch import nn
class PolyLR(LambdaLR):
"""DeepLab learning rate policy"""
| [
11748,
18931,
198,
198,
6738,
28034,
13,
40085,
1330,
26147,
35,
11,
7244,
198,
6738,
28034,
13,
40085,
13,
14050,
62,
1416,
704,
18173,
1330,
21114,
6814,
35972,
11,
5012,
35972,
11,
15237,
8600,
35972,
198,
6738,
28034,
1330,
299,
77,... | 3.1875 | 64 |
from collections import Counter
from itertools import product
import numpy as np
import advent
if __name__ == '__main__':
main()
| [
6738,
17268,
1330,
15034,
198,
6738,
340,
861,
10141,
1330,
1720,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
19980,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
... | 3.27907 | 43 |
import pyro
import pyro.distributions as dist
import torch
from scvi import _CONSTANTS
from scvi.module.base import PyroBaseModuleClass, auto_move_data
from scvi.nn import DecoderSCVI, Encoder
class MyPyroModule(PyroBaseModuleClass):
"""
Skeleton Variational auto-encoder Pyro model.
Here we implement a basic version of scVI's underlying VAE [Lopez18]_.
This implementation is for instructional purposes only.
Parameters
----------
n_input
Number of input genes
n_latent
Dimensionality of the latent space
n_hidden
Number of nodes per hidden layer
n_layers
Number of hidden layers used for encoder and decoder NNs
"""
@staticmethod
@torch.no_grad()
@auto_move_data
| [
11748,
12972,
305,
198,
11748,
12972,
305,
13,
17080,
2455,
507,
355,
1233,
198,
11748,
28034,
198,
6738,
629,
8903,
1330,
4808,
10943,
2257,
1565,
4694,
198,
6738,
629,
8903,
13,
21412,
13,
8692,
1330,
44954,
14881,
26796,
9487,
11,
82... | 2.878788 | 264 |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import os
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.resource import ResourceManagementClient
from azure.common.credentials import ServicePrincipalCredentials
#--------------------------------------------------------------------------
# credentials from environment
#--------------------------------------------------------------------------
SUBSCRIPTION_ID = os.environ['AZURE_SUBSCRIPTION_ID']
TENANT_ID = os.environ['AZURE_TENANT']
CLIENT_ID = os.environ['AZURE_CLIENT_ID']
CLIENT_SECRET = os.environ['AZURE_SECRET']
#--------------------------------------------------------------------------
# variables
#--------------------------------------------------------------------------
AZURE_LOCATION = 'eastus'
RESOURCE_GROUP = "myResourceGroup"
VM_NAME = "myVm"
NETWORK_INTERFACE_NAME = "myNetworkInterface"
VIRTUAL_NETWORK_NAME = "myVirtualNetwork"
SUBNET_NAME = "mySubnet"
#--------------------------------------------------------------------------
# management clients
#--------------------------------------------------------------------------
credentials = ServicePrincipalCredentials(
client_id=CLIENT_ID,
secret=CLIENT_SECRET,
tenant=TENANT_ID
)
mgmt_client = ComputeManagementClient(credentials, SUBSCRIPTION_ID)
resource_client = ResourceManagementClient(credentials, SUBSCRIPTION_ID)
from azure.mgmt.network import NetworkManagementClient
network_client = NetworkManagementClient(credentials, SUBSCRIPTION_ID)
#--------------------------------------------------------------------------
# resource group (prerequisite)
#--------------------------------------------------------------------------
print("Creating Resource Group")
resource_client.resource_groups.create_or_update(resource_group_name=RESOURCE_GROUP, parameters={ 'location': AZURE_LOCATION })
#--------------------------------------------------------------------------
# virtual network (prerequisite)
#--------------------------------------------------------------------------
print("Prerequisite - Creating Virtual Network")
azure_operation_poller = network_client.virtual_networks.create_or_update(
RESOURCE_GROUP,
VIRTUAL_NETWORK_NAME,
{
'location': AZURE_LOCATION,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
},
)
result_create = azure_operation_poller.result()
async_subnet_creation = network_client.subnets.create_or_update(
RESOURCE_GROUP,
VIRTUAL_NETWORK_NAME,
SUBNET_NAME,
{'address_prefix': '10.0.0.0/24'}
)
subnet_info = async_subnet_creation.result()
#--------------------------------------------------------------------------
# network interface (prerequisite)
#--------------------------------------------------------------------------
print("Prerequisite - Creating Network Interface")
async_nic_creation = network_client.network_interfaces.create_or_update(
RESOURCE_GROUP,
NETWORK_INTERFACE_NAME,
{
'location': AZURE_LOCATION,
'ip_configurations': [{
'name': 'MyIpConfig',
'subnet': {
'id': subnet_info.id
}
}]
}
)
nic_info = async_nic_creation.result()
#--------------------------------------------------------------------------
# /VirtualMachines/put/Create a vm with password authentication.[put]
#--------------------------------------------------------------------------
print("Create a vm with password authentication.")
BODY = {
"location": AZURE_LOCATION,
"hardware_profile": {
"vm_size": "Standard_D1_v2"
},
"storage_profile": {
"image_reference": {
"sku": "2016-Datacenter",
"publisher": "MicrosoftWindowsServer",
"version": "latest",
"offer": "WindowsServer"
},
"os_disk": {
"caching": "ReadWrite",
"managed_disk": {
"storage_account_type": "Standard_LRS"
},
"name": "myVMosdisk",
"create_option": "FromImage"
}
},
"os_profile": {
"admin_username": "myuser",
"computer_name": "myVM",
"admin_password": "Password123!!!"
},
"network_profile": {
"network_interfaces": [
{
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/networkInterfaces/" + NETWORK_INTERFACE_NAME,
"properties": {
"primary": True
}
}
]
}
}
result = mgmt_client.virtual_machines.create_or_update(resource_group_name=RESOURCE_GROUP, vm_name=VM_NAME, parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /VirtualMachines/get/Get Virtual Machine Instance View.[get]
#--------------------------------------------------------------------------
print("Get Virtual Machine Instance View.")
result = mgmt_client.virtual_machines.instance_view(resource_group_name=RESOURCE_GROUP, vm_name=VM_NAME)
#--------------------------------------------------------------------------
# /VirtualMachines/get/Lists all available virtual machine sizes to which the specified virtual machine can be resized[get]
#--------------------------------------------------------------------------
print("Lists all available virtual machine sizes to which the specified virtual machine can be resized")
result = mgmt_client.virtual_machines.list_available_sizes(resource_group_name=RESOURCE_GROUP, vm_name=VM_NAME)
#--------------------------------------------------------------------------
# /VirtualMachines/get/Get a Virtual Machine.[get]
#--------------------------------------------------------------------------
print("Get a Virtual Machine.")
result = mgmt_client.virtual_machines.get(resource_group_name=RESOURCE_GROUP, vm_name=VM_NAME)
#--------------------------------------------------------------------------
# /VirtualMachines/get/Lists all the virtual machines under the specified subscription for the specified location.[get]
#--------------------------------------------------------------------------
print("Lists all the virtual machines under the specified subscription for the specified location.")
result = mgmt_client.virtual_machines.list_by_location(location=AZURE_LOCATION)
| [
2,
10097,
45537,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321,
13,
198,
2,
10097,
35937,
198,
1... | 3.674617 | 1,761 |
# Generated by Django 2.0 on 2019-01-12 16:27
from django.db import migrations, models
import showcase.file_size_validator
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
319,
13130,
12,
486,
12,
1065,
1467,
25,
1983,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
21742,
13,
7753,
62,
7857,
62,
12102,
1352,
628
] | 3.125 | 40 |
# Generated by Django 2.2.6 on 2019-11-18 11:44
import datetime
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
21,
319,
13130,
12,
1157,
12,
1507,
1367,
25,
2598,
198,
198,
11748,
4818,
8079,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.972222 | 36 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import datetime
import time
import opasConfig
import models
import logging
import localsecrets
# import urllib.parse
# import json
import sys
# from opasAPISupportLib import save_opas_session_cookie
sys.path.append("..") # Adds higher directory to python modules path.
from config.opasConfig import OPASSESSIONID
logger = logging.getLogger(__name__)
# for this module
# logger.setLevel(logging.DEBUG)
if 0:
# create console handler and set level to debug
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter(opasConfig.FORMAT)
# add formatter to ch
ch.setFormatter(formatter)
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
from starlette.responses import Response
from starlette.requests import Request
import starlette.status as httpCodes
# import localsecrets
from localsecrets import PADS_BASE_URL, PADS_TEST_ID, PADS_TEST_PW, PADS_BASED_CLIENT_IDS
base = PADS_BASE_URL
# base = "http://development.org:9300"
import opasCentralDBLib
ocd = opasCentralDBLib.opasCentralDB()
def find_client_session_id(request: Request,
response: Response,
client_session: str=None
):
"""
ALWAYS returns a session ID or None
Dependency for client_session id:
gets it from header;
if not there, gets it from query param;
if not there, gets it from a cookie
Otherwise, gets a new one from the auth server
"""
ret_val = None
if client_session is None or client_session == 'None':
client_session = request.headers.get(opasConfig.CLIENTSESSIONID, None)
if client_session is not None:
ret_val = client_session
#msg = f"client-session from header: {ret_val} "
#logger.debug(msg)
else:
#Won't work unless they expose cookie to client, so don't waste time
#pepweb_session_cookie = request.cookies.get("pepweb_session", None)
opas_session_cookie = request.cookies.get(opasConfig.OPASSESSIONID, None)
client_session_qparam = request.query_params.get(opasConfig.CLIENTSESSIONID, None)
client_session_cookie = request.cookies.get(opasConfig.CLIENTSESSIONID, None)
if client_session_qparam is not None:
ret_val = client_session_qparam
msg = f"client-session from param: {ret_val}. URL: {request.url}"
logger.info(msg)
elif client_session_cookie is not None:
ret_val = client_session_cookie
msg = f"client-session from client-session cookie: {ret_val}. URL: {request.url}"
logger.info(msg)
elif opas_session_cookie is not None and opas_session_cookie != 'None':
msg = f"client-session from stored OPASSESSION cookie {opas_session_cookie}. URL: {request.url} "
logger.info(msg)
ret_val = opas_session_cookie
else:
msg = f"No dependency client-session ID found. Returning None. URL: {request.url}"
logger.info(msg)
ret_val = None
if ret_val is not None and opas_session_cookie is not None and opas_session_cookie != ret_val:
# overwrite any saved cookie, if there is one
logger.debug("Saved OpasSessionID Cookie")
response.set_cookie(
OPASSESSIONID,
value=f"{client_session}",
domain=localsecrets.COOKIE_DOMAIN
)
return ret_val
def get_user_ip(request: Request):
"""
Returns a users IP if passed in the headers.
"""
ret_val = None
if request is not None:
ret_val = request.headers.get(opasConfig.X_FORWARDED_FOR, None)
if ret_val is not None:
try:
req_url = request.url
msg = f"X-Forwarded-For from header: {ret_val}. URL: {req_url}"
logger.debug(msg)
except Exception as e:
logger.error(f"Error: {e}")
return ret_val
def get_authserver_session_info(session_id,
client_id=opasConfig.NO_CLIENT_ID,
pads_session_info=None,
request=None):
"""
Return a filled-in SessionInfo object from several PaDS calls
Saves the session information to the SQL database (or updates it)
>>> session_info = get_authserver_session_info(None, "4")
>>> session_info.username == "NotLoggedIn"
True
>>> pads_session_info = pads_login()
>>> session_id = pads_session_info.SessionId
>>> session_info = get_authserver_session_info(session_id, "4", pads_session_info=pads_session_info)
>>> session_info.authorized_peparchive == True
True
>>> session_info = get_authserver_session_info("7F481226-9AF1-47BC-8E26-F07DB8C3E78D", "4")
>>> print (session_info)
session_id='7F481226-9AF1-47BC-8E26-F07DB8C3E78D' user_id=0 username='NotLoggedIn' ...
>>> session_info.username == "NotLoggedIn"
True
"""
ts = time.time()
caller_name = "get_authserver_session_info"
#make sure it's ok, this is causing problems on production
#see if it's an int?
client_id = validate_client_id(client_id, caller_name=caller_name)
if pads_session_info is None or session_id is None:
# not supplied, so fetch
try:
logger.debug(f"{caller_name}: calling PaDS")
pads_session_info = get_pads_session_info(session_id=session_id,
client_id=client_id,
retry=False,
request=request)
try:
session_info = models.SessionInfo(session_id=pads_session_info.SessionId, api_client_id=client_id)
except Exception as e:
msg = f"{caller_name}: Error {e}. SessID: {session_id} client_id: {client_id} req: {request}"
if opasConfig.LOCAL_TRACE:
print (msg)
logger.error(msg)
session_info = models.SessionInfo(session_id="unknown", api_client_id=client_id)
else:
session_id = session_info.session_id
except Exception as e:
logger.error(f"{caller_name}: Error getting pads_session_info {e}")
client_id_type = type(client_id)
if client_id_type == int:
session_info = models.SessionInfo(session_id="unknown", api_client_id=client_id)
else:
session_info = models.SessionInfo(session_id="unknown", api_client_id=opasConfig.NO_CLIENT_ID)
#else:
#session_info = models.SessionInfo(session_id=session_id, api_client_id=client_id)
# This section is causing errors--I believe it's because PaDS is calling the API without real user info
if pads_session_info is not None:
if pads_session_info.SessionId is not None:
session_info = models.SessionInfo(session_id=pads_session_info.SessionId, api_client_id=client_id)
else:
session_info = models.SessionInfo(session_id=session_id, api_client_id=client_id)
start_time = pads_session_info.session_start_time if pads_session_info.session_start_time is not None else datetime.datetime.now()
try:
session_info.has_subscription = pads_session_info.HasSubscription
except Exception as e:
logger.error(f"{caller_name}: HasSubscription not supplied by PaDS")
session_info.has_subscription = False
try:
session_info.is_valid_login = pads_session_info.IsValidLogon
session_info.authenticated = pads_session_info.IsValidLogon
except Exception as e:
logger.error(f"{caller_name}: IsValidLogon not supplied by PaDS")
session_info.is_valid_login = False
try:
session_info.is_valid_username = pads_session_info.IsValidUserName
except Exception as e:
logger.error(f"{caller_name}: IsValidUsername not supplied by PaDS")
session_info.is_valid_username = False
# session_info.confirmed_unauthenticated = False
session_info.session_start = start_time
session_info.session_expires_time = start_time + datetime.timedelta(seconds=pads_session_info.SessionExpires)
session_info.pads_session_info = pads_session_info
user_logged_in_bool = pads_session_info.IsValidLogon
# either continue an existing session, or start a new one
if request is not None:
if user_logged_in_bool or pads_session_info.IsValidLogon:
pads_user_info, status_code = get_authserver_session_userinfo(session_id, client_id, addl_log_info=" (complete session_record)")
session_info.pads_user_info = pads_user_info
if status_code == 401: # could be just no session_id, but also could have be returned by PaDS if it doesn't recognize it
if session_info.pads_session_info.pads_status_response > 500:
msg = f"{caller_name}: PaDS error or PaDS unavailable - user cannot be logged in and no session_id assigned"
logger.error(msg)
if session_id is not None:
logger.warning(f"{session_id} call to pads produces 401 error. Setting user_logged_in to False")
user_logged_in_bool = False
# session is not logged in
# session_info.confirmed_unauthenticated = True
# these are defaults so commented out
# session_info.authenticated = False
# session_info.user_id = 0
# session_info.username = opasConfig.USER_NOT_LOGGED_IN_NAME
# session_info.user_type = "Unknown"
# session_info.admin = False
# session_info.authorized_peparchive = False
# session_info.authorized_pepcurrent = False
else:
start_time = pads_session_info.session_start_time if pads_session_info.session_start_time is not None else datetime.datetime.now()
if pads_user_info is not None:
session_info.user_id = userID=pads_user_info.UserId
session_info.username = pads_user_info.UserName
session_info.user_type = pads_user_info.UserType
session_info.admin = pads_user_info.UserType=="Admin"
session_info.authorized_peparchive = pads_user_info.HasArchiveAccess
session_info.authorized_pepcurrent = pads_user_info.HasCurrentAccess
logger.debug("PaDS returned user info. Saving to DB")
unused_val = save_session_info_to_db(session_info)
if session_info.user_type is None:
session_info.user_type = "Unknown"
if session_info.username is None:
session_info.username = opasConfig.USER_NOT_LOGGED_IN_NAME
# print (f"SessInfo: {session_info}")
logger.debug(f"***authent: {session_info.authenticated} - get_full_session_info total time: {time.time() - ts}***")
return session_info
def get_authserver_session_userinfo(session_id, client_id, addl_log_info=""):
"""
Send PaDS the session ID and see if that's associated with a user yet.
"""
ret_val = None
caller_name = "get_authserver_session_userinfo"
status_code = 401
msg = f"for session {session_id} from client {client_id}"
#logger.debug(msg)
if session_id is not None:
full_URL = base + f"/v1/Users" + f"?SessionID={session_id}"
try:
response = requests.get(full_URL, headers={"Content-Type":"application/json"}) # Call PaDS
ocd.temp_pads_log_call(caller=caller_name, reason=caller_name + addl_log_info, session_id=session_id, pads_call=full_URL, return_status_code=response.status_code) # Log Call PaDS
except Exception as e:
logger.error(f"{caller_name}: Error from auth server user info call: {e}. Non-logged in user {msg}")
else:
status_code = response.status_code
padsinfo = response.json()
if response.ok:
padsinfo = fix_userinfo_invalid_nones(padsinfo)
ret_val = models.PadsUserInfo(**padsinfo)
else:
logger.debug(f"Non-logged in user {msg}. Info from PaDS: {padsinfo}") # 2021.08.08 back to debug...seems consistent.
return ret_val, status_code # padsinfo, status_code
def authserver_login(username=PADS_TEST_ID,
password=PADS_TEST_PW,
session_id=None,
client_id=opasConfig.NO_CLIENT_ID,
retry=True):
"""
Login directly via the auth server (e.g., in this case PaDS)
If session_id is included, the idea is that the logged in entity will keep that constant.
-- #TODO but that's not implemented in this server itself, if logged in through there, yet!
"""
msg = ""
caller_name = "authserver_login"
logger.info(f"Logging in user {username} with session_id {session_id}")
if session_id is not None:
full_URL = base + f"/v1/Authenticate/?SessionId={session_id}"
else:
full_URL = base + f"/v1/Authenticate/"
try:
pads_response = requests.post(full_URL, headers={"Content-Type":"application/json"}, json={"UserName":f"{username}", "Password":f"{password}"})
ocd.temp_pads_log_call(caller=caller_name, reason=caller_name, session_id=session_id, pads_call=full_URL, return_status_code=pads_response.status_code, params=username) # Log Call PaDS
except Exception as e:
msg = f"{caller_name}: Authorization server not available. {e}"
logger.error(msg)
if opasConfig.LOCAL_TRACE: print (f"****WATCH_THIS****: {msg}")
# set up response with default model
pads_session_info = models.PadsSessionInfo()
if session_id is not None:
pads_session_info.SessionId = session_id
#session_info = models.SessionInfo()
else:
status_code = pads_response.status_code # save it for a bit (we replace pads_session_info below)
if pads_response.ok:
pads_response = pads_response.json()
pads_response = fix_pydantic_invalid_nones(pads_response, caller_name="AuthserverLogin")
if isinstance(pads_response, str):
pads_session_info = models.PadsSessionInfo()
logger.error(f"{caller_name}: returned error string: {pads_response}")
else:
try:
pads_session_info = models.PadsSessionInfo(**pads_response)
except Exception as e:
logger.error(f"{caller_name}: return assignment error: {e}")
pads_session_info = models.PadsSessionInfo()
elif status_code > 403:
if retry == True:
# try once without the session ID
msg = f"{caller_name}: Login returned {status_code}. Trying without session id."
logger.error(msg)
pads_session_info = authserver_login(username=username, password=password, client_id=client_id, retry=False)
else:
msg = f"{caller_name}: Auth System Issue. Login returned {status_code}. Retry (failed), or Retry not selected."
logger.error(msg)
pads_session_info = models.PadsSessionInfo()
pads_session_info.pads_status_response = status_code
pads_session_info.pads_disposition = msg
else:
try:
pads_response = pads_response.json()
pads_response = fix_pydantic_invalid_nones(pads_response)
if isinstance(pads_response, str):
pads_session_info = models.PadsSessionInfo()
msg = f"{caller_name}: Returned error string: {pads_response}"
logger.error(msg)
else:
try:
pads_session_info = models.PadsSessionInfo(**pads_response)
except Exception as e:
msg = f"{caller_name}: Return assignment error: {e}"
logger.error(msg)
pads_session_info = models.PadsSessionInfo()
except Exception as e:
logger.error(f"{caller_name}: Response processing error {e}")
pads_session_info = models.PadsSessionInfo(**pads_session_info)
pads_session_info.pads_status_response = status_code
pads_session_info.pads_disposition = msg
return pads_session_info
def get_access_limitations(doc_id,
classification, # document classification, e.g., free, current, archive, undefined, offsite, toc
session_info, # updated in code below
year=None,
doi=None,
documentListItem: models.DocumentListItem=None, # deprecated, not used
fulltext_request:bool=None,
request=None):
"""
Based on the classification of the document (archive, current [embargoed],
free, offsite), and the users permissions in session_info, determine whether
this user has access to the full-text of the document, and fill out permissions
in accessLimitations (ret_val) structure for document doc_id
20210428 - removed documentListItem and update side effects, caller should copy access
There are still side effects on session_info
"""
caller_name = "get_access_limitations"
try:
open_access = False
ret_val = models.AccessLimitations()
ret_val.doi = doi
ret_val.accessLimitedPubLink = None
ret_val.accessLimitedCode = 200 # default (for now)
# USE THESE DEFAULTS, only set below if different
# default, turned on if classification below is opasConfig.DOCUMENT_ACCESS_EMBARGOED
ret_val.accessLimited = True # no access by default, may be changed below.
ret_val.accessChecked = False # Same as default, for better clarity here
ret_val.accessLimitedClassifiedAsCurrentContent = False
if session_info is None:
# logger.warning(f"Document permissions for {doc_id} -- no session info")
ret_val.accessLimitedCode = 401 # no session
session_id = "No Session Info"
# not logged in
# use all the defaults above, log error below.
else:
# for debugging display at return
try:
session_id = session_info.session_id
except:
session_id = "No Session ID"
if ret_val.doi is not None:
publisherAccess = opasConfig.ACCESS_SUMMARY_PUBLISHER_INFO + opasConfig.ACCESS_SUMMARY_PUBLISHER_INFO_DOI_LINK % ret_val.doi
# TODO: get the link we use to send users to publishers site when we don't have it, and no doi, and implement here.
# for now, just doi
ret_val.accessLimitedPubLink = opasConfig.ACCESS_SUMMARY_PUBLISHER_INFO_DOI_LINK % ret_val.doi
else:
publisherAccess = "."
if classification in (opasConfig.DOCUMENT_ACCESS_FREE):
# free can be for anyone!!!! Change accessLimited
open_access = True
ret_val.accessLimited = False
ret_val.accessChecked = True
ret_val.accessLimitedDescription = opasConfig.ACCESSLIMITED_DESCRIPTION_FREE
#"This content is currently free to all users."
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_FREE
elif classification in (opasConfig.DOCUMENT_ACCESS_OFFSITE):
# we only allow reading abstracts for offsite, accessLimited is True
ret_val.accessLimitedDescription = opasConfig.ACCESS_SUMMARY_DESCRIPTION
#"This content is currently completely limited to all users."
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_OFFSITE + publisherAccess # limited...get it elsewhere
elif classification in (opasConfig.DOCUMENT_ACCESS_EMBARGOED): # PEPCurrent
ret_val.accessLimitedDescription = opasConfig.ACCESS_SUMMARY_DESCRIPTION
ret_val.accessLimitedClassifiedAsCurrentContent = True
ret_val.accessLimitedReason = opasConfig.ACCESS_SUMMARY_DESCRIPTION + opasConfig.ACCESS_SUMMARY_EMBARGOED + publisherAccess # limited...get it elsewhere
if session_info is not None:
try:
# #########################################################################################
# optimization...if authorized for PEPCurrent, don't check again this query, unless it's a full-text request
# #########################################################################################
if session_info.authorized_pepcurrent:
ret_val.accessLimited = False # you can access it!!!
ret_val.accessChecked = True
# "This current content is available for you to access"
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_CURRENT_CONTENT_AVAILABLE
logger.debug("Optimization - session info used to authorize PEPCurrent document")
except Exception as e:
logger.error(f"{caller_name}: PEPCurrent document permission: {e}")
elif classification in (opasConfig.DOCUMENT_ACCESS_ARCHIVE):
ret_val.accessLimitedDescription = opasConfig.ACCESS_SUMMARY_DESCRIPTION
# ret_val.accessLimited = True # default is true
ret_val.accessLimitedReason = opasConfig.ACCESS_SUMMARY_FORSUBSCRIBERS
# #########################################################################################
# optimization...if authorized, don't check again, unless it's a full-text request
# #########################################################################################
if session_info is not None:
try:
if session_info.authorized_peparchive:
ret_val.accessLimited = False # you can access it!!!
ret_val.accessChecked = True
# "This content is available for you to access"
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_AVAILABLE
logger.debug("Optimization - session info used to authorize PEPArchive document")
except Exception as e:
logger.error(f"{caller_name}: PEPArchive document permission: {e}")
elif classification in (opasConfig.DOCUMENT_ACCESS_TOC):
open_access = True
ret_val.accessLimited = False # you can access it!!! (All TOCs are open)
ret_val.accessChecked = True
# just like free for now
ret_val.accessLimitedDescription = opasConfig.ACCESSLIMITED_DESCRIPTION_FREE
#"This content is currently free to all users."
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_FREE
else:
logger.error(f"{caller_name}: Unknown classification: {classification}")
# **************************************
# Now check for access, or cached access
# - always check for a full-text request so PaDS can track them.
# since we don't really always know about authentication, we need to check all requests that are otherwise rejected.
# **************************************
try:
if not open_access:
if (session_info.authenticated == True # Must be authenticated for this check
and (ret_val.accessLimited == True # if it's marked limited, then may need to check, it might be first one
or fulltext_request == True)): # or whenever full-text is requested.
# and session_info.api_client_session and session_info.api_client_id in PADS_BASED_CLIENT_IDS:
if fulltext_request:
reason_for_check = opasConfig.AUTH_DOCUMENT_VIEW_REQUEST
else:
reason_for_check = opasConfig.AUTH_ABSTRACT_VIEW_REQUEST
try:
pads_authorized, resp = authserver_permission_check(session_id=session_info.session_id,
doc_id=doc_id,
doc_year=year,
reason_for_check=reason_for_check,
request=request)
except Exception as e:
# PaDS could be down, local development
logger.error(f"{caller_name}: Access Exception: {e}")
if localsecrets.BASEURL == "development.org:9100":
resp = models.PadsPermitInfo(Permit=True, HasArchiveAccess=True, HasCurrentAccess=True)
# so it doesn't have to check this later
session_info.authorized_peparchive = True
session_info.authorized_pepcurrent = True
else:
session_info.authorized_peparchive = False
session_info.authorized_pepcurrent = False
resp = models.PadsPermitInfo(Permit=False, HasArchiveAccess=False, HasCurrentAccess=False)
finally:
# save PaDS code
ret_val.accessLimitedCode = resp.StatusCode
if resp.StatusCode == httpCodes.HTTP_401_UNAUTHORIZED: # or resp.ReasonStr == 'Session has not been authenticated':
# if this is True, then we can stop asking this time
# You would get the same return if
# the session was not recognised on pads,
# the session had been deleted from the database (should never happen…), or
# the session simply never existed.
ret_val.accessLimited = True
session_info.authenticated = False
msg = f"Full text of {doc_id} unavailable. " + opasConfig.ACCESSLIMITED_401_UNAUTHORIZED
ret_val.accessLimitedReason = msg
else:
# set default again based on update from PaDS query
ret_val.accessLimited = True
if ret_val.accessLimitedClassifiedAsCurrentContent == True:
if resp.HasCurrentAccess == True:
session_info.authorized_pepcurrent = True
ret_val.accessLimited = False
ret_val.accessChecked = True
else:
ret_val.accessLimited = True
else: # not current content
if resp.HasArchiveAccess == True:
session_info.authorized_peparchive = True
ret_val.accessLimited = False
ret_val.accessChecked = True
if fulltext_request and pads_authorized:
# let's make sure we know about this user.
if session_info.user_id == opasConfig.USER_NOT_LOGGED_IN_NAME:
# We got this far, We need to find out who this is
pads_user_info, status_code = get_authserver_session_userinfo(session_info.session_id, session_info.api_client_id, addl_log_info=" (user info not yet collected)")
if pads_user_info is not None:
session_info.user_id = pads_user_info.UserId
session_info.username = pads_user_info.UserName
session_info.user_type = pads_user_info.UserType # TODO - Add this to session table
# session_info.session_expires_time = ?
# ocd = opasCentralDBLib.opasCentralDB()
ocd.update_session(session_info.session_id,
userID=session_info.user_id,
username=session_info.username,
authenticated=1,
authorized_peparchive=1 if session_info.authorized_peparchive == True else 0,
authorized_pepcurrent=1 if session_info.authorized_pepcurrent == True else 0,
session_end=session_info.session_expires_time,
api_client_id=session_info.api_client_id
)
if pads_authorized:
# "This content is available for you to access"
ret_val.accessLimited = False
ret_val.accessChecked = True
ret_val.accessLimitedDescription = opasConfig.ACCESSLIMITED_DESCRIPTION_AVAILABLE
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_AVAILABLE
msg = f"Document {doc_id} available. Pads Reason: {resp.ReasonStr}. Opas Reason: {ret_val.accessLimitedDescription} - {ret_val.accessLimitedReason}"
logger.debug(msg)
ret_val.accessLimitedDebugMsg = msg
else:
# changed from warning to info 2021-06-02 to reduce normal logging
msg = f"Document {doc_id} unavailable. Pads Reason: {resp.ReasonStr} Opas: {ret_val.accessLimitedDescription} - {ret_val.accessLimitedReason}"
logger.info(msg) # limited...get it elsewhere
ret_val.accessLimitedDebugMsg = msg
ret_val.accessLimited = True
if ret_val.accessLimitedClassifiedAsCurrentContent:
# embargoed
ret_val.accessLimitedReason = opasConfig.ACCESS_SUMMARY_EMBARGOED
else:
# non embargoed, but no access.
ret_val.accessLimitedReason = f"{ret_val.accessLimitedDescription} {ret_val.accessLimitedReason}"
else:
# not full-text OR (not authenticated or accessLimited==False)
msg = f"No PaDS check needed: Document {doc_id} accessLimited: {ret_val.accessLimited}. Authent: {session_info.authenticated}"
logger.debug(msg)
ret_val.accessLimitedDebugMsg = msg
else: # It's open access!
msg = f"No PaDS check needed: Document {doc_id} is open access"
logger.debug(msg)
ret_val.accessLimitedDebugMsg = msg
except Exception as e:
msg = f"{caller_name}: Issue checking document permission. Possibly not logged in {e}"
logger.error(msg)
ret_val.accessLimitedDebugMsg = msg
pass # can't be checked, will be unauthorized.
except Exception as e:
msg = f"{caller_name}: General exception {e} trying ascertain access limitations."
logger.error(msg)
if ret_val is None:
ret_val = models.AccessLimitations() # make sure there's defaults!
ret_val.accessLimitedDebugMsg = msg
if fulltext_request and ret_val.accessLimited:
# happens anytime someone views an abstract in Document mode because they don't have an account. Perfectly legal. Changed to info (from error)
msg = f"Full-text access for {doc_id} denied ({ret_val.accessLimitedCode}). Sess:{session_id}: Access:{ret_val.accessLimitedReason}"
logger.info(msg)
ret_val.accessLimitedDebugMsg = msg
return ret_val
# ##################################################################################################################################################
#
# LOCAL ROUTUNES
#
# ##################################################################################################################################################
def get_pads_session_info(session_id=None,
client_id=opasConfig.NO_CLIENT_ID,
retry=True,
request=None):
"""
Get the PaDS session model, and get a new session ID from the auth server if needed
"""
msg = ""
caller_name = "get_pads_session_info"
if client_id == opasConfig.NO_CLIENT_ID:
logger.warning(f"{caller_name}: Session info call for Session ID: {session_id} Client ID was NO_CLIENT_ID ({opasConfig.NO_CLIENT_ID}).")
if session_id is not None:
full_URL = base + f"/v1/Authenticate/IP/" + f"?SessionID={session_id}"
else:
full_URL = base + f"/v1/Authenticate/IP/"
req_url = "No request info."
if request is not None:
try: # just in case this generates an error
req_url = request.url # to log caller url
except Exception as e:
pass
user_ip = get_user_ip(request) # returns an IP if X_FORWARDED_FOR address is in header
try:
logger.debug(f"{caller_name}: calling PaDS")
if user_ip is not None and user_ip is not '':
headers = { opasConfig.X_FORWARDED_FOR:user_ip }
pads_session_info = requests.get(full_URL, headers) # Call PaDS
logger.debug(f"{caller_name}: Session ID:{session_id}. X_FORWARDED_FOR from authenticateIP: {user_ip}. URL: {req_url} PaDS Session Info: {pads_session_info}")
else:
pads_session_info = requests.get(full_URL) # Call PaDS
except Exception as e:
logger.error(f"{caller_name}: Authorization server not available. {e}")
pads_session_info = models.PadsSessionInfo()
else:
status_code = pads_session_info.status_code # save it for a bit (we replace pads_session_info below)
ocd.temp_pads_log_call(caller=caller_name, reason=caller_name, session_id=session_id, pads_call=full_URL, ip_address=user_ip, return_status_code=status_code) # Log Call PaDS
if status_code > 403: # e.g., (httpCodes.HTTP_500_INTERNAL_SERVER_ERROR, httpCodes.HTTP_503_SERVICE_UNAVAILABLE):
error_text = f"{caller_name}: PaDS session_info status_code is {status_code}"
logger.error(error_text)
# try once without the session ID
if retry == True:
pads_session_info = get_pads_session_info(client_id=client_id, retry=False, request=request)
pads_session_info.pads_status_response = status_code
else:
logger.error(error_text)
pads_session_info = models.PadsSessionInfo()
pads_session_info.pads_status_response = status_code
pads_session_info.pads_disposition = error_text
else:
try:
pads_session_info = pads_session_info.json()
pads_session_info = fix_pydantic_invalid_nones(pads_session_info, caller_name=caller_name)
pads_session_info = models.PadsSessionInfo(**pads_session_info)
pads_session_info.pads_status_response = status_code
logger.debug(f"PaDS Status Ok, Final IP Session Info: {pads_session_info} URL: {req_url}.")
except Exception as e:
msg = f"{caller_name}: Response processing error {e}"
logger.error(msg)
pads_session_info = models.PadsSessionInfo(**pads_session_info)
pads_session_info.pads_status_response = status_code
pads_session_info.pads_disposition = msg
return pads_session_info
if __name__ == "__main__":
import doctest
import sys
print (40*"*", "opasDocPermissionsTests", 40*"*")
print (f"Running in Python {sys.version_info[0]}.{sys.version_info[1]}")
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(lineno)d - %(levelname)s %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
doctest.testmod(optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE)
print ("Fini. Tests complete.") | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
7007,
198,
11748,
4818,
8079,
198,
11748,
640,
198,
11748,
1034,
292,
16934,
198,
11748,
4981,
198,
11748,
... | 2.070156 | 18,801 |
from os.path import dirname, basename, isfile
import glob
modules = glob.glob(dirname(__file__)+"/*.py")
__all__ = [ basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]
from .proximal_join import proximal_join, get_column_types
from .interpolate import interpolate
from .stitch import stitch
from .jump_correct import jump_correct
from .derivative import derivative
| [
6738,
28686,
13,
6978,
1330,
26672,
3672,
11,
1615,
12453,
11,
318,
7753,
220,
198,
11748,
15095,
198,
18170,
796,
15095,
13,
4743,
672,
7,
15908,
3672,
7,
834,
7753,
834,
47762,
1,
15211,
13,
9078,
4943,
198,
834,
439,
834,
796,
68... | 3.045802 | 131 |
#!/usr/bin/env python3
"""
Author : Derek Widmayer <dwidmaye@gmail.com>
Date : 2021-01-10
Purpose: Rock the Casbah
"""
import argparse
# --------------------------------------------------
def get_args():
"""Jump the five"""
parser = argparse.ArgumentParser(
description='Jump the five',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('str',
metavar='str',
help='Input text')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Encode jump the five"""
text = get_args().str
encoding = {'1': '9', '2': '8', '3': '7', '4': '6', '5': '0', '6': '4', '7': '3', '8': '2', '9': '1', '0': '5'}
encoded_text = ""
for char in text:
encoded_text += encoding.get(char, char)
print(f'{encoded_text}')
# --------------------------------------------------
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
13838,
1058,
20893,
24801,
76,
2794,
1279,
67,
28029,
11261,
68,
31,
14816,
13,
785,
29,
198,
10430,
220,
220,
1058,
33448,
12,
486,
12,
940,
198,
30026,
3455,
25,
46... | 2.544271 | 384 |
import os
import os.path
import glob
import cv2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
IMAGENET_MEAN_BGR = [103.939, 116.779, 123.68]
def load_images(data_path, image_height, image_width, plot=False):
"""
Read an image in BGR,
resize to image_height x image_width,
subtract mean of ImageNet dataset
"""
# Get a list of images in the folder
os.chdir(data_path)
list = glob.glob('*.jpg')
N_images = len(list)
# Create arrays to store data
images = np.zeros((N_images, image_height, image_width, 3), dtype = np.float32)
if plot:
fig = plt.figure(figsize=(15,6))
for i in range(0, N_images):
# Load image
image_name = list[i]
image = cv2.imread(image_name)
if plot:
# Plot an image
fig.add_subplot(1, N_images, i+1)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
# Resize to image_height x image_width
images[i, :, :, :] = cv2.resize(image.astype(np.float32),(image_height, image_width))
# Subtract ImageNet mean
images[i, :, :, :] -= IMAGENET_MEAN_BGR
return images
def load_images_with_labels(data_path, labels_path, image_height, image_width):
"""
Read an image in BGR,
resize to image_height x image_width,
subtract mean of ImageNet dataset.
Assign a label to an image:
1 if there is a tumour, 0 otherwise
"""
# Get a list of images in the folder
os.chdir(data_path)
list = glob.glob('*.jpeg')
N_images = len(list)
return N_images
# Create arrays to store data and labels
images = np.zeros((N_images, image_height, image_width, 3), dtype = np.float32)
labels = -1 * np.ones((N_images, 1), dtype = np.float32)
for i in range(0, N_images):
# Load image in BGR
image_name = list[i]
image = cv2.imread(image_name)
# Load image in RGB
# image = plt.imread(image_name)
# Convert RGB to BGR
#image = image[:, :, [2, 1, 0]]
# Resize to image_height x image_width
images[i, :, :, :] = cv2.resize(image.astype(np.float32),(image_height, image_width))
# Subtract ImageNet mean
images[i, :, :, :] -= IMAGENET_MEAN_BGR
# Assign a label to an image:
# 1 if there is a tumour, 0 otherwise
file_path = labels_path + image_name[:-5] + ".txt"
if os.path.isfile(file_path):
labels[i] = 1
else:
labels[i] = 0
return images, labels
def load_images_with_masks(data_path, mask_path, image_height, image_width, binary=False, plot=False):
"""
Read an image in BGR,
resize to image_height x image_width,
subtract mean of ImageNet dataset.
Read the corresponding binary mask.
"""
# Get the list of images
os.chdir(data_path)
image_list = glob.glob('*.jpg')
N_images = len(image_list)
# Get the list of masks
os.chdir(mask_path)
mask_list = glob.glob('*.jpg')
# Create arrays to store data
images = np.zeros((N_images, image_height, image_width, 3), dtype = np.float32)
masks = np.zeros((N_images, image_height, image_width), dtype = np.float32)
if plot:
fig = plt.figure(figsize=(15,6))
for i in range(0, N_images):
# Load image
image_name = image_list[i]
os.chdir(data_path)
image = cv2.imread(image_name)
# Resize to image_height x image_width
images[i, :, :, :] = cv2.resize(image.astype(np.float32),(image_height, image_width))
# Subtract ImageNet mean
images[i, :, :, :] -= IMAGENET_MEAN_BGR
# Check if there is a mask
mask_name = image_name[:-4] + '_mask.jpg'
if mask_name in mask_list:
os.chdir(mask_path)
mask = cv2.resize(plt.imread(mask_name).astype(np.float32), (image_height, image_width))
if binary:
mask = 0 * (mask < 128.0) + 1 * (mask >= 128.0)
masks[i, :, :] = mask
if plot:
# Plot image
fig.add_subplot(N_images, 2, 2*i+1)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.axis('off')
# Plot mask
fig.add_subplot(N_images, 2, 2*i+2)
plt.imshow(mask)
plt.axis('off')
plt.show()
return images, masks
def split_train_val(data, labels, train_ratio=0.8):
"""
Split data on training and validation sets
"""
# Shuffle indeces
n = len(data)
indeces = list(range(0, n))
np.random.shuffle(indeces)
# Create training set
train_indeces = indeces[:round(train_ratio * n)]
X_train = data[train_indeces, :, :, :]
y_train = labels[train_indeces]
# Create validation set
val_indeces = indeces[round(train_ratio * n):]
X_val = data[val_indeces, :, :, :]
y_val = labels[val_indeces]
print("Training set:", X_train.shape, y_train.shape)
print("Validation set:", X_val.shape, y_val.shape)
return X_train, y_train, X_val, y_val
def stratified_train_val(data, labels, train_ratio=0.8, balance_classes=False):
"""
Create stratified training and validation sets for binary data
"""
# numbers of positive and negative samples in the dataset
n_pos = int(sum(labels))
n_neg = data.shape[0] - n_pos
print('Number of negative samples: ', n_neg)
print('Number of positive samples: ', n_pos)
print('Fraction of positive samples: ', n_pos / data.shape[0] * 100, '%')
# to fix class imbalance equalize
# the numbers of negative and positive samples
if balance_classes:
if n_neg > n_pos:
n_neg = n_pos
else:
n_pos = n_neg
# print the numbers of negative/positive samples
# in training and validation sets
print('Positive samples:',
round(train_ratio * n_pos), "in y_train,",
round((1 - train_ratio) * n_pos), "in y_val")
print('Negative samples:',
round(train_ratio * n_neg), "in y_train,",
round((1 - train_ratio) * n_neg), "in y_val")
# extract, shuffle and split indeces of positive samples
pos_indeces = (np.where(labels == 1))[0]
np.random.shuffle(pos_indeces)
pos_indeces_train = pos_indeces[:round(train_ratio * n_pos)]
pos_indeces_val = pos_indeces[round(train_ratio * n_pos):]
# extract, shuffle and split indeces of negative samples
neg_indeces = (np.where(labels == 0))[0]
np.random.shuffle(neg_indeces)
neg_indeces_train = neg_indeces[:round(train_ratio * n_neg)]
neg_indeces_val = neg_indeces[round(train_ratio * n_neg):]
# create a training set
train_indeces = np.append(pos_indeces_train, neg_indeces_train, axis=0)
np.random.shuffle(train_indeces)
X_train = data[train_indeces, :, :, :]
y_train = labels[train_indeces]
# create a validation set
val_indeces = np.append(pos_indeces_val, neg_indeces_val, axis = 0)
np.random.shuffle(val_indeces)
X_val = data[val_indeces, :, :, :]
y_val = labels[val_indeces]
print("Training set:", X_train.shape, y_train.shape)
print("Validation set:", X_val.shape, y_val.shape)
return X_train, y_train, X_val, y_val
| [
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
11748,
15095,
198,
11748,
269,
85,
17,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
3... | 2.132612 | 3,514 |
# pylint: disable=wildcard-import, unused-import, unused-wildcard-import
"""Neural network related operators."""
# Re-export in a specific file name so that autodoc can pick it up
from .op.nn import *
| [
2,
279,
2645,
600,
25,
15560,
28,
21992,
9517,
12,
11748,
11,
21958,
12,
11748,
11,
21958,
12,
21992,
9517,
12,
11748,
198,
37811,
8199,
1523,
3127,
3519,
12879,
526,
15931,
198,
2,
797,
12,
39344,
287,
257,
2176,
2393,
1438,
523,
3... | 3.40678 | 59 |
from aiofile import AIOFile
from os import remove
from re import findall
from api import util
from os.path import getsize
from os import listdir
from random import randint
| [
6738,
257,
952,
7753,
1330,
317,
9399,
8979,
198,
6738,
28686,
1330,
4781,
198,
6738,
302,
1330,
1064,
439,
198,
6738,
40391,
1330,
7736,
198,
6738,
28686,
13,
6978,
1330,
3011,
1096,
198,
6738,
28686,
1330,
1351,
15908,
198,
6738,
4738... | 3.729167 | 48 |
import os
import secrets
import tarfile
import time
import zipfile
from collections import defaultdict
import httpx
import pytest
from hatch.config.constants import PublishEnvVars
from hatch.utils.ci import running_in_ci
PUBLISHER_TOKEN = os.environ.get('HATCH_CI_PUBLISHER_TOKEN')
pytestmark = [
pytest.mark.skipif(not PUBLISHER_TOKEN, reason='Publishing tests are only executed within CI environments'),
]
@pytest.fixture(autouse=True)
@pytest.fixture
| [
11748,
28686,
198,
11748,
13141,
198,
11748,
13422,
7753,
198,
11748,
640,
198,
11748,
19974,
7753,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
11748,
2638,
87,
198,
11748,
12972,
9288,
198,
198,
6738,
25834,
13,
11250,
13,
9979,
1187,... | 2.957055 | 163 |
from manim import *
import numpy as np
# creates lists of lists of squares, used for input, kernel, and output
# moves kernel around and displays output squares one at a time
# creates padding
| [
6738,
582,
320,
1330,
1635,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
220,
220,
220,
220,
201,
198,
220,
220,
220,
1303,
8075,
8341,
286,
8341,
286,
24438,
11,
973,
329,
5128,
11,
9720,
11,
290,
5072,
201,
198,
201,
198,
... | 3.142857 | 70 |
from django.urls import path
from . import views
urlpatterns = [
path('', views.feed, name='feed'),
path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'),
path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'),
path('post/new/', views.PostCreateView.as_view(), name='post-create'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
5009,
13,
12363,
11,
1438,
11639,
12363,
33809,
198,
220,
220,
220,
3108,
10786,
735... | 2.651515 | 132 |
''' demo of reading a button
2017-0808 PePo - added OLED display to demo
Adafruit article:
https://learn.adafruit.com/micropython-hardware-digital-i-slash-o/digital-inputs
'''
import machine, time
import ssd1306
__LED_PIN = const(14) #GPIO14
__BUTTON_PIN = const(12) #GPIO12
#define led to be set on / off by button
led = machine.Pin(__LED_PIN, machine.Pin.OUT)
led.off()
# OPTIONAL: status of led: True=on, False=off
# led_status = False
# create i2c for OLED display
i2c = machine.I2C(scl=machine.Pin(5), sda=machine.Pin(4), freq=100000)
print('i2c.scan: ', i2c.scan()) #[60]
# OLED screen dimensions
__WIDTH = const(128)
__HEIGHT = const(32)
oled = ssd1306.SSD1306_I2C(__WIDTH, __HEIGHT, i2c)
# define button on Pin GPIO12
button = machine.Pin(__BUTTON_PIN, machine.Pin.IN, machine.Pin.PULL_UP)
# helper to refresh OLED display
# demo ...
# run demo
try:
print('Button demo, press button...')
refreshOLED('Press button!')
run()
except:
print('Done')
refreshOLED('Done!')
| [
7061,
6,
13605,
286,
3555,
257,
4936,
198,
2177,
12,
15,
28362,
2631,
18833,
532,
2087,
47463,
3359,
284,
13605,
198,
1215,
1878,
4872,
2708,
25,
198,
3740,
1378,
35720,
13,
324,
1878,
4872,
13,
785,
14,
9383,
1773,
7535,
12,
10424,
... | 2.559796 | 393 |
from copy import deepcopy
from dataclasses import dataclass
import itertools
import re
from typing import Dict
from typing import Optional
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from obp.ope import SlateIndependentIPS
from obp.ope import SlateOffPolicyEvaluation
from obp.ope import SlateRewardInteractionIPS
from obp.ope import SlateStandardIPS
from obp.types import BanditFeedback
from obp.utils import check_confidence_interval_arguments
mock_policy_value = 0.5
mock_confidence_interval = {
"mean": 0.5,
"95.0% CI (lower)": 0.3,
"95.0% CI (upper)": 0.7,
}
@dataclass
class SlateStandardIPSMock(SlateStandardIPS):
"""Slate Standard Inverse Propensity Scoring (SIPS) Mock."""
estimator_name: str = "sips"
eps: float = 0.1
def estimate_policy_value(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore: np.ndarray,
evaluation_policy_pscore: np.ndarray,
**kwargs,
) -> float:
"""Estimate the policy value of evaluation policy.
Returns
----------
mock_policy_value: float
"""
return mock_policy_value + self.eps
def estimate_interval(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore: np.ndarray,
evaluation_policy_pscore: np.ndarray,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Returns
----------
mock_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_confidence_interval_arguments(
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
return {k: v + self.eps for k, v in mock_confidence_interval.items()}
@dataclass
class SlateIndependentIPSMock(SlateIndependentIPS):
"""Slate Independent Inverse Propensity Scoring (IIPS) Mock."""
estimator_name: str = "iips"
def estimate_policy_value(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore_item_position: np.ndarray,
evaluation_policy_pscore_item_position: np.ndarray,
**kwargs,
) -> float:
"""Estimate the policy value of evaluation policy.
Returns
----------
mock_policy_value: float
"""
return mock_policy_value
def estimate_interval(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore_item_position: np.ndarray,
evaluation_policy_pscore_item_position: np.ndarray,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Returns
----------
mock_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_confidence_interval_arguments(
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
return {k: v for k, v in mock_confidence_interval.items()}
@dataclass
class SlateRewardInteractionIPSMock(SlateRewardInteractionIPS):
"""Slate Recursive Inverse Propensity Scoring (RIPS) Mock."""
estimator_name: str = "rips"
def estimate_policy_value(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore_cascade: np.ndarray,
evaluation_policy_pscore_cascade: np.ndarray,
**kwargs,
) -> float:
"""Estimate the policy value of evaluation policy.
Returns
----------
mock_policy_value: float
"""
return mock_policy_value
def estimate_interval(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore_cascade: np.ndarray,
evaluation_policy_pscore_cascade: np.ndarray,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Returns
----------
mock_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_confidence_interval_arguments(
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
return {k: v for k, v in mock_confidence_interval.items()}
# define Mock instances
sips = SlateStandardIPSMock(len_list=3)
sips2 = SlateStandardIPSMock(len_list=3, eps=0.02)
sips3 = SlateStandardIPSMock(len_list=3, estimator_name="sips3")
iips = SlateIndependentIPSMock(len_list=3)
rips = SlateRewardInteractionIPSMock(len_list=3)
def test_meta_post_init(synthetic_slate_bandit_feedback: BanditFeedback) -> None:
"""
Test the __post_init__ function
"""
# __post_init__ saves the latter estimator when the same estimator name is used
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips, sips2]
)
assert ope_.ope_estimators_ == {
"sips": sips2
}, "__post_init__ returns a wrong value"
# __post_init__ can handle the same estimator if the estimator names are different
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips, sips3]
)
assert ope_.ope_estimators_ == {
"sips": sips,
"sips3": sips3,
}, "__post_init__ returns a wrong value"
# __post__init__ raises RuntimeError when necessary_keys are not included in the bandit_feedback
necessary_keys = ["slate_id", "position", "reward"]
for i in range(len(necessary_keys)):
for deleted_keys in itertools.combinations(necessary_keys, i + 1):
invalid_bandit_feedback_dict = {key: "_" for key in necessary_keys}
# delete
for k in deleted_keys:
del invalid_bandit_feedback_dict[k]
with pytest.raises(RuntimeError, match=r"Missing key*"):
_ = SlateOffPolicyEvaluation(
bandit_feedback=invalid_bandit_feedback_dict, ope_estimators=[sips]
)
# evaluation_policy_pscore, description
invalid_input_of_create_estimator_inputs = [
(
None,
"one of evaluation_policy_pscore, evaluation_policy_pscore_item_position, or evaluation_policy_pscore_cascade must be given",
),
]
# evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description
valid_input_of_create_estimator_inputs = [
(
np.ones(300),
np.ones(300),
np.ones(300),
"deterministic evaluation policy",
),
]
@pytest.mark.parametrize(
"evaluation_policy_pscore, description",
invalid_input_of_create_estimator_inputs,
)
def test_meta_create_estimator_inputs_using_invalid_input_data(
evaluation_policy_pscore,
description: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the _create_estimator_inputs using valid data and a sips estimator
"""
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips]
)
# raise ValueError when the shape of two arrays are different
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_._create_estimator_inputs(
evaluation_policy_pscore=evaluation_policy_pscore
)
# _create_estimator_inputs function is called in the following functions
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_.estimate_policy_values(
evaluation_policy_pscore=evaluation_policy_pscore
)
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_.estimate_intervals(evaluation_policy_pscore=evaluation_policy_pscore)
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_.summarize_off_policy_estimates(
evaluation_policy_pscore=evaluation_policy_pscore
)
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_.evaluate_performance_of_estimators(
ground_truth_policy_value=0.1,
evaluation_policy_pscore=evaluation_policy_pscore,
)
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_.summarize_estimators_comparison(
ground_truth_policy_value=0.1,
evaluation_policy_pscore=evaluation_policy_pscore,
)
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description",
valid_input_of_create_estimator_inputs,
)
def test_meta_create_estimator_inputs_using_valid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the _create_estimator_inputs using invalid data
"""
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips]
)
estimator_inputs = ope_._create_estimator_inputs(
evaluation_policy_pscore=evaluation_policy_pscore
)
assert set(estimator_inputs.keys()) == set(
[
"reward",
"pscore",
"pscore_item_position",
"pscore_cascade",
"position",
"evaluation_policy_pscore",
"evaluation_policy_pscore_item_position",
"evaluation_policy_pscore_cascade",
"slate_id",
]
), f"Invalid response of _create_estimator_inputs (test case: {description})"
# _create_estimator_inputs function is called in the following functions
_ = ope_.estimate_policy_values(evaluation_policy_pscore=evaluation_policy_pscore)
_ = ope_.estimate_intervals(evaluation_policy_pscore=evaluation_policy_pscore)
_ = ope_.summarize_off_policy_estimates(
evaluation_policy_pscore=evaluation_policy_pscore
)
_ = ope_.evaluate_performance_of_estimators(
ground_truth_policy_value=0.1, evaluation_policy_pscore=evaluation_policy_pscore
)
_ = ope_.summarize_estimators_comparison(
ground_truth_policy_value=0.1, evaluation_policy_pscore=evaluation_policy_pscore
)
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description",
valid_input_of_create_estimator_inputs,
)
def test_meta_estimate_policy_values_using_valid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of estimate_policy_values using valid data
"""
# single ope estimator (iips)
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[iips]
)
assert ope_.estimate_policy_values(
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position
) == {
"iips": mock_policy_value
}, "SlateOffPolicyEvaluation.estimate_policy_values ([IIPS]) returns a wrong value"
# multiple ope estimators
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback,
ope_estimators=[iips, sips, rips],
)
assert ope_.estimate_policy_values(
evaluation_policy_pscore=evaluation_policy_pscore,
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade,
) == {
"iips": mock_policy_value,
"sips": mock_policy_value + sips.eps,
"rips": mock_policy_value,
}, "SlateOffPolicyEvaluation.estimate_policy_values ([IIPS, SIPS, RIPS]) returns a wrong value"
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description",
valid_input_of_create_estimator_inputs,
)
# alpha, n_bootstrap_samples, random_state, err, description
invalid_input_of_estimate_intervals = [
(
0.05,
100,
"s",
ValueError,
"'s' cannot be used to seed a numpy.random.RandomState instance",
),
(0.05, -1, 1, ValueError, "`n_bootstrap_samples`= -1, must be >= 1"),
(
0.05,
"s",
1,
TypeError,
"`n_bootstrap_samples` must be an instance of <class 'int'>, not <class 'str'>",
),
(-1.0, 1, 1, ValueError, "`alpha`= -1.0, must be >= 0.0"),
(2.0, 1, 1, ValueError, "`alpha`= 2.0, must be <= 1.0"),
(
"0",
1,
1,
TypeError,
"`alpha` must be an instance of <class 'float'>, not <class 'str'>",
),
]
valid_input_of_estimate_intervals = [
(0.05, 100, 1, "random_state is 1"),
(0.05, 1, 1, "n_bootstrap_samples is 1"),
]
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description_1",
valid_input_of_create_estimator_inputs,
)
@pytest.mark.parametrize(
"alpha, n_bootstrap_samples, random_state, err, description_2",
invalid_input_of_estimate_intervals,
)
def test_meta_estimate_intervals_using_invalid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description_1: str,
alpha,
n_bootstrap_samples,
random_state,
err,
description_2: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of estimate_intervals using invalid data
"""
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[iips]
)
with pytest.raises(err, match=f"{description_2}*"):
_ = ope_.estimate_intervals(
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
# estimate_intervals function is called in summarize_off_policy_estimates
with pytest.raises(err, match=f"{description_2}*"):
_ = ope_.summarize_off_policy_estimates(
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description_1",
valid_input_of_create_estimator_inputs,
)
@pytest.mark.parametrize(
"alpha, n_bootstrap_samples, random_state, description_2",
valid_input_of_estimate_intervals,
)
def test_meta_estimate_intervals_using_valid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description_1: str,
alpha: float,
n_bootstrap_samples: int,
random_state: int,
description_2: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of estimate_intervals using valid data
"""
# single ope estimator
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[iips]
)
assert ope_.estimate_intervals(
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
) == {
"iips": mock_confidence_interval
}, "SlateOffPolicyEvaluation.estimate_intervals ([IIPS]) returns a wrong value"
# multiple ope estimators
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[iips, sips]
)
assert ope_.estimate_intervals(
evaluation_policy_pscore=evaluation_policy_pscore,
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
) == {
"iips": mock_confidence_interval,
"sips": {k: v + sips.eps for k, v in mock_confidence_interval.items()},
}, "SlateOffPolicyEvaluation.estimate_intervals ([IIPS, SIPS]) returns a wrong value"
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description_1",
valid_input_of_create_estimator_inputs,
)
@pytest.mark.parametrize(
"alpha, n_bootstrap_samples, random_state, description_2",
valid_input_of_estimate_intervals,
)
def test_meta_summarize_off_policy_estimates(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description_1: str,
alpha: float,
n_bootstrap_samples: int,
random_state: int,
description_2: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of summarize_off_policy_estimates using valid data
"""
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips, sips3]
)
value, interval = ope_.summarize_off_policy_estimates(
evaluation_policy_pscore=evaluation_policy_pscore,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
expected_value = pd.DataFrame(
{
"sips": mock_policy_value + sips.eps,
"sips3": mock_policy_value + sips3.eps,
},
index=["estimated_policy_value"],
).T
expected_value["relative_estimated_policy_value"] = expected_value[
"estimated_policy_value"
] / (
synthetic_slate_bandit_feedback["reward"].sum()
/ np.unique(synthetic_slate_bandit_feedback["slate_id"]).shape[0]
)
expected_interval = pd.DataFrame(
{
"sips": {k: v + sips.eps for k, v in mock_confidence_interval.items()},
"sips3": {k: v + sips3.eps for k, v in mock_confidence_interval.items()},
}
).T
assert_frame_equal(value, expected_value), "Invalid summarization (policy value)"
assert_frame_equal(interval, expected_interval), "Invalid summarization (interval)"
# check relative estimated policy value when the average of bandit_feedback["reward"] is zero
zero_reward_bandit_feedback = deepcopy(synthetic_slate_bandit_feedback)
zero_reward_bandit_feedback["reward"] = np.zeros(
zero_reward_bandit_feedback["reward"].shape[0]
)
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=zero_reward_bandit_feedback, ope_estimators=[sips, sips3]
)
value, _ = ope_.summarize_off_policy_estimates(
evaluation_policy_pscore=evaluation_policy_pscore,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
expected_value = pd.DataFrame(
{
"sips": mock_policy_value + sips.eps,
"sips3": mock_policy_value + sips3.eps,
},
index=["estimated_policy_value"],
).T
expected_value["relative_estimated_policy_value"] = np.nan
assert_frame_equal(value, expected_value), "Invalid summarization (policy value)"
invalid_input_of_evaluation_performance_of_estimators = [
("foo", 0.3, ValueError, "metric must be either 'relative-ee' or 'se'"),
(
"se",
1,
TypeError,
"`ground_truth_policy_value` must be an instance of <class 'float'>, not <class 'int'>.",
),
(
"se",
"a",
TypeError,
"`ground_truth_policy_value` must be an instance of <class 'float'>, not <class 'str'>.",
),
(
"relative-ee",
0.0,
ValueError,
"ground_truth_policy_value must be non-zero when metric is relative-ee",
),
]
valid_input_of_evaluation_performance_of_estimators = [
("se", 0.0, "metric is se and ground_truth_policy_value is 0.0"),
("relative-ee", 1.0, "metric is relative-ee and ground_truth_policy_value is 1.0"),
]
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description_1",
valid_input_of_create_estimator_inputs,
)
@pytest.mark.parametrize(
"metric, ground_truth_policy_value, err, description_2",
invalid_input_of_evaluation_performance_of_estimators,
)
def test_meta_evaluate_performance_of_estimators_using_invalid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description_1: str,
metric,
ground_truth_policy_value,
err,
description_2: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of evaluate_performance_of_estimators using invalid data
"""
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[iips]
)
with pytest.raises(err, match=f"{description_2}*"):
_ = ope_.evaluate_performance_of_estimators(
ground_truth_policy_value=ground_truth_policy_value,
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
metric=metric,
)
# estimate_intervals function is called in summarize_off_policy_estimates
with pytest.raises(err, match=f"{description_2}*"):
_ = ope_.summarize_estimators_comparison(
ground_truth_policy_value=ground_truth_policy_value,
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
metric=metric,
)
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description_1",
valid_input_of_create_estimator_inputs,
)
@pytest.mark.parametrize(
"metric, ground_truth_policy_value, description_2",
valid_input_of_evaluation_performance_of_estimators,
)
def test_meta_evaluate_performance_of_estimators_using_valid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description_1: str,
metric,
ground_truth_policy_value,
description_2: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of evaluate_performance_of_estimators using valid data
"""
if metric == "relative-ee":
# calculate relative-ee
eval_metric_ope_dict = {
"sips": np.abs(
(mock_policy_value + sips.eps - ground_truth_policy_value)
/ ground_truth_policy_value
),
"sips3": np.abs(
(mock_policy_value + sips3.eps - ground_truth_policy_value)
/ ground_truth_policy_value
),
}
else:
# calculate se
eval_metric_ope_dict = {
"sips": (mock_policy_value + sips.eps - ground_truth_policy_value) ** 2,
"sips3": (mock_policy_value + sips3.eps - ground_truth_policy_value) ** 2,
}
# check performance estimators
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips, sips3]
)
performance = ope_.evaluate_performance_of_estimators(
ground_truth_policy_value=ground_truth_policy_value,
evaluation_policy_pscore=evaluation_policy_pscore,
metric=metric,
)
for k, v in performance.items():
assert k in eval_metric_ope_dict, "Invalid key of performance response"
assert v == eval_metric_ope_dict[k], "Invalid value of performance response"
performance_df = ope_.summarize_estimators_comparison(
ground_truth_policy_value=ground_truth_policy_value,
evaluation_policy_pscore=evaluation_policy_pscore,
metric=metric,
)
assert_frame_equal(
performance_df, pd.DataFrame(eval_metric_ope_dict, index=[metric]).T
), "Invalid summarization (performance)"
| [
6738,
4866,
1330,
2769,
30073,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
11748,
340,
861,
10141,
198,
11748,
302,
198,
6738,
19720,
1330,
360,
713,
198,
6738,
19720,
1330,
32233,
198,
198,
11748,
299,
32152,
355,
45941,
... | 2.350366 | 10,666 |
import os, sys
__file__ = os.path.normpath(os.path.abspath(__file__))
__path__ = os.path.dirname(__file__)
__popup_path__ = os.path.join(__path__, 'popup')
# print(__path__)
if __path__ not in sys.path:
sys.path.insert(0, __path__)
if __popup_path__ not in sys.path:
sys.path.insert(0, __popup_path__)
from csharp_element import CSharpElement
from csharp_reference import CSharpReference
import popup.yaml_reference_popup
import popup.yaml_gameobject_popup
import popup.yaml_transform_popup
import popup.csharp_reference_popup
import popup.csharp_class_summary_popup
import popup.csharp_method_summary_popup
import popup.csharp_class_inherits_diagram_popup
import popup.git_whatchanged_commit_popup
import popup.git_summary_list_popup
## Popups ##
| [
11748,
28686,
11,
25064,
198,
198,
834,
7753,
834,
796,
28686,
13,
6978,
13,
27237,
6978,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
834,
7753,
834,
4008,
198,
834,
6978,
834,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834... | 2.700704 | 284 |
# Mock apis needs to be commented before used within SAP Data Intelligence
#from diadmin.dimockapi.mock_api import mock_api
#api = mock_api(__file__)
import os
import json
import requests
import http.client
from base64 import b64encode
api.add_generator(gen) | [
2,
44123,
2471,
271,
2476,
284,
307,
16476,
878,
973,
1626,
48323,
6060,
9345,
198,
2,
6738,
2566,
28482,
13,
27740,
735,
15042,
13,
76,
735,
62,
15042,
1330,
15290,
62,
15042,
198,
2,
15042,
796,
15290,
62,
15042,
7,
834,
7753,
834... | 3.303797 | 79 |
from tkinter import filedialog
from bs4 import *
import re
from pprint import *
import pprint
import xlsxwriter
from tkinter import *
# from tkinter.filedialog import askopenfilename
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = filedialog.askopenfilename() # show an "Open" dialog box and return the path to the selected file
filename.replace("/", "\\\\")
rawhtml = open(filename,
encoding="utf-8").readlines()
hhosts =allhosts()
haha =foo(hhosts)
# print(type(haha))
# print(haha)
a, b =reformat(haha)
# print(a)
reformatforprint(a, b)
print("Done! Next!")
| [
6738,
256,
74,
3849,
1330,
5717,
498,
519,
201,
198,
6738,
275,
82,
19,
1330,
1635,
201,
198,
11748,
302,
201,
198,
6738,
279,
4798,
1330,
1635,
201,
198,
11748,
279,
4798,
201,
198,
11748,
2124,
7278,
87,
16002,
201,
198,
6738,
256... | 2.515038 | 266 |