content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Generated by Django 3.1.4 on 2020-12-08 11:56
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
19,
319,
12131,
12,
1065,
12,
2919,
1367,
25,
3980,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
# Copyright (c) 2020 Jan Vrany <jan.vrany (a) fit.cvut.cz>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import gi
gi.require_version("Gdk", "3.0")
gi.require_version("Gtk", "3.0")
from gi.repository import GObject, Gtk, Gdk
from math import pi
def sgn(value):
"""
Sign function
"""
if value < 0:
return -1
elif value == 0:
return 0
else:
return 1
def deg2rad(value):
"""
Convert value in degrees to radians (as required used cairo)
"""
return value * (pi / 180.0)
if __name__ == '__main__':
import sys
from gi.repository import Gio
app = WidgetApp(Joystick)
#app = WidgetApp(TiltIndicator)
app.run(sys.argv) | [
2,
15069,
357,
66,
8,
12131,
2365,
569,
81,
1092,
1279,
13881,
13,
37020,
1092,
357,
64,
8,
4197,
13,
33967,
315,
13,
26691,
29,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
198,
... | 3.051786 | 560 |
"""
Created on June 21, 2018
@author: Moritz
"""
import numpy as np
| [
37811,
198,
41972,
319,
2795,
2310,
11,
2864,
198,
198,
31,
9800,
25,
3461,
4224,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
628
] | 2.84 | 25 |
# print table # | [
198,
2,
3601,
3084,
1303
] | 3.2 | 5 |
from methods import is_prime
from time import time
elapsed_time = time()
current_last = 1
side_len = 1
travel_by = 1
edges_num = 0
prime_edges_num = 0
ratio = 1
while ratio > 0.1:
side_len += 2
edges_num += 4
current_last += 1
current_last += travel_by
travel_by += 1
if is_prime(current_last):
prime_edges_num += 1
for i in range(0, 3):
current_last += travel_by
if is_prime(current_last):
prime_edges_num += 1
travel_by += 1
ratio = prime_edges_num / edges_num
print(side_len)
elapsed_time = time() - elapsed_time
print('Time: ' + str(elapsed_time))
| [
6738,
5050,
1330,
318,
62,
35505,
198,
6738,
640,
1330,
640,
198,
198,
417,
28361,
62,
2435,
796,
640,
3419,
198,
198,
14421,
62,
12957,
796,
352,
198,
198,
1589,
62,
11925,
796,
352,
198,
198,
35927,
62,
1525,
796,
352,
198,
198,
... | 2.326007 | 273 |
''' @package main driver for the fullerene energy computer.
'''
from sys import argv
from fullerene_curvature.curvature import compute_k_values, compute_g_values, \
compute_energy, compute_euler_characteristic
from fullerene_curvature.fullerene import Fullerene
if __name__ == "__main__":
file_name = argv[1]
try:
input_fullerene = Fullerene(file_name)
k_values = compute_k_values(input_fullerene)
g_values = compute_g_values(input_fullerene)
euler_characteristic = compute_euler_characteristic(g_values)
energy_value = compute_energy(k_values, g_values)
print("Energy:", energy_value)
print("Euler_Characteristic:", euler_characteristic)
except:
print("Failed:", file_name)
| [
7061,
6,
2488,
26495,
1388,
4639,
329,
262,
46246,
293,
25924,
2568,
3644,
13,
198,
7061,
6,
198,
6738,
25064,
1330,
1822,
85,
198,
6738,
46246,
293,
25924,
62,
22019,
85,
1300,
13,
22019,
85,
1300,
1330,
24061,
62,
74,
62,
27160,
1... | 2.557047 | 298 |
#!/usr/bin/python3
"""
Offers support for WS2812 LED LEDs via the hardware SPI MOSI
Uses py-spidev:
```
git clone https://github.com/doceme/py-spidev.git
cd py-spidev
make
make install
```
"""
import time
import datetime
import socket
import math
import re
import random
from izaber import initialize, config
from PIL import Image, ImageFilter, ImageFont, ImageDraw
while True:
try:
main()
except Exception as ex:
now = datetime.datetime.now()
print(f"Error at {now}! {ex}")
time.sleep(1)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
37811,
198,
9362,
364,
1104,
329,
25290,
2078,
1065,
12365,
33697,
2884,
262,
6890,
49091,
337,
2640,
40,
198,
198,
5842,
274,
12972,
12,
2777,
485,
85,
25,
198,
198,
15506,
63,
19... | 2.708333 | 192 |
#-*- coding:utf-8 -*-
# zhangwei99@baidu.com
import openpyxl as pyxl
import numpy as np
import math
import os
from collections import OrderedDict
#def parseTxt(TxtName, parseArray):
# fp = open(TxtName)
# lines = fp.readlines()
# for line in lines:
# sp = line.rstrip("\n").split(",")
# fId = int(sp[1])
# if fId >= 3000:
# continue
# mId = int(sp[2])
# type = int(sp[3])
# parseArray[fId, mId - 1, type] = 1
if __name__ == "__main__":
videoInfo = {"cam_1":{"frame_num":3000, "movement_num":4},
"cam_1_dawn":{"frame_num":3000, "movement_num":4},
"cam_1_rain":{"frame_num":2961, "movement_num":4},
"cam_2":{"frame_num":18000, "movement_num":4},
"cam_2_rain":{"frame_num":3000, "movement_num":4},
"cam_3":{"frame_num":18000, "movement_num":4},
"cam_3_rain":{"frame_num":3000, "movement_num":4},
"cam_4":{"frame_num":27000, "movement_num":12},
"cam_4_dawn":{"frame_num":4500, "movement_num":12},
"cam_4_rain":{"frame_num":3000, "movement_num":12},
"cam_5":{"frame_num":18000, "movement_num":12},
"cam_5_dawn":{"frame_num":3000, "movement_num":12},
"cam_5_rain":{"frame_num":3000, "movement_num":12},
"cam_6":{"frame_num":18000, "movement_num":12},
"cam_6_snow":{"frame_num":3000, "movement_num":12},
"cam_7":{"frame_num":14400, "movement_num":12},
"cam_7_dawn":{"frame_num":2400, "movement_num":12},
"cam_7_rain":{"frame_num":3000, "movement_num":12},
"cam_8":{"frame_num":3000, "movement_num":6},
"cam_9":{"frame_num":3000, "movement_num":12},
"cam_10":{"frame_num":2111, "movement_num":3},
"cam_11":{"frame_num":2111, "movement_num":3},
"cam_12":{"frame_num":1997, "movement_num":3},
"cam_13":{"frame_num":1966, "movement_num":3},
"cam_14":{"frame_num":3000, "movement_num":2},
"cam_15":{"frame_num":3000, "movement_num":2},
"cam_16":{"frame_num":3000, "movement_num":2},
"cam_17":{"frame_num":3000, "movement_num":2},
"cam_18":{"frame_num":3000, "movement_num":2},
"cam_19":{"frame_num":3000, "movement_num":2},
"cam_20":{"frame_num":3000, "movement_num":2}}
# segment number
n = 10
gtXlsxRoot = "./gt/"
pdXlsxRoot = "./vehicle_counting_results/"
vNum = len(videoInfo.keys())
nwRMSEVec = np.zeros(vNum)
vehicleNumVec = np.zeros(vNum)
vId = 0
for vName, info in videoInfo.items():
fNum = videoInfo[vName]["frame_num"]
if fNum > 3000:
fNum = 3000
mNum = videoInfo[vName]["movement_num"]
# parse gt
gtArray = np.zeros((fNum, mNum, 2))
gtXlsx = gtXlsxRoot + "/" + vName + ".xlsx"
if not os.path.exists(gtXlsx):
continue
parseXLSX(gtXlsx, gtArray)
# parse prediction
pdArray = np.zeros((fNum, mNum, 2))
pdXlsx = pdXlsxRoot + "/" + vName + ".txt"
if not os.path.exists(pdXlsx):
continue
parseTxt(pdXlsx, pdArray)
nwRMSE, vehicleNum = compute_nwRMSE(n, pdArray, gtArray)
nwRMSEVec[vId] = nwRMSE
vehicleNumVec[vId] = vehicleNum
vId += 1
print(" %s nwRMSE: %f"%(vName, nwRMSE/vehicleNum))
score2 = sum(nwRMSEVec) / sum(vehicleNumVec)
baseFactor = 0.464906
videoTotal = 300 + 296 + 300 + 300 + 30 * 60 + 300 + 30 * 60 + 300 + 300 + 30 * 60 + 300 + 300 + 30 * 60 + 300 + 30 * 60 + 300 + 300 + 30 * 60 + 300 + 300 + 211 + 211 + 200 + 197 + 300 + 300 + 300 + 300 + 300 + 300 + 300
#time = 6217
time = 9997 # res50
time = 11418 # res50 pipeline
#time = 43642 # res154
#time = 8487 # omni
score1 = 1 - (time * baseFactor) / (5 * float(videoTotal))
score = 0.3 * score1 + 0.7 * score2
print("\ns1: %f; effective: %f; efficient: %f"%(score, score2, score1))
| [
2,
12,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
1976,
33255,
42990,
2079,
31,
65,
1698,
84,
13,
785,
198,
11748,
1280,
9078,
87,
75,
355,
12972,
87,
75,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
117... | 1.896116 | 2,214 |
#!env/bin/python3
import itertools
import pycosat
from mongoengine import *
from .models import PEOPLE, WEAPONS, ROOMS
| [
2,
0,
24330,
14,
8800,
14,
29412,
18,
198,
198,
11748,
340,
861,
10141,
198,
11748,
12972,
6966,
265,
198,
6738,
285,
25162,
18392,
1330,
1635,
198,
198,
6738,
764,
27530,
1330,
36388,
11,
12887,
2969,
19213,
11,
15107,
2662,
50,
628
... | 2.904762 | 42 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2common import log as logging
from st2common.services.rules import get_rules_given_trigger
from st2common.services.triggers import get_trigger_db_by_ref
from st2reactor.rules.enforcer import RuleEnforcer
from st2reactor.rules.matcher import RulesMatcher
from st2common.metrics.base import get_driver
LOG = logging.getLogger('st2reactor.rules.RulesEngine')
__all__ = [
'RulesEngine'
]
| [
2,
49962,
284,
262,
23881,
32173,
11,
3457,
19203,
25896,
32173,
11537,
739,
530,
393,
517,
198,
2,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
9387,
351,
198,
2,
428,
670,
329,
3224,
1321,
5115,
6634,
9238,
13,
198,
2,
383... | 3.721713 | 327 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007-2012 Peter Kuma
import os
from datetime import date
from django.http import Http404
from django.core.exceptions import PermissionDenied
from django.shortcuts import render
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django import forms
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from main.models import *
from news.models import *
from news.feed import NewsFeed
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
66,
8,
4343,
12,
6999,
5613,
509,
7487,
198,
198,
11748,
28686,
198,
6738,
4818,
8079,
1330,
3128,
198,
198,
6738,
42625,
14208,
13,
4023,
1330,... | 3.558824 | 204 |
keyboard.send_keys('<ctrl>+/') | [
2539,
3526,
13,
21280,
62,
13083,
10786,
27,
44755,
29,
10,
14,
11537
] | 2.307692 | 13 |
import pandas as pd
| [
11748,
19798,
292,
355,
279,
67,
628
] | 3 | 7 |
# -*- coding: utf-8 -*-
# @Author: xiaodong
# @Date : 2021/5/27
__author__ = "xiaodong"
__version__ = "0.1.3"
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
13838,
25,
2124,
544,
375,
506,
198,
2,
2488,
10430,
220,
1058,
33448,
14,
20,
14,
1983,
198,
198,
834,
9800,
834,
796,
366,
36072,
375,
506,
1,
198,
834,
... | 1.964912 | 57 |
import random
player = input('Enter your name: ')
print(f'Hello, {player}')
score = 0
rating = open('rating.txt')
for rate in rating:
if player in rate:
score = int(rate.split()[1])
rating.close()
winning_cases = {
'water' : ['scissors', 'fire', 'rock', 'hun', 'lightning', 'devil', 'dragon'],
'dragon' : ['snake', 'scissors', 'fire', 'rock', 'gun', 'lightning', 'devil'],
'devil' : ['tree', 'human', 'snake', 'scissors', 'fire', 'rock', 'gun'],
'gun' : ['wolf', 'tree', 'human', 'snake', 'scissors', 'fire', 'rock'],
'rock' : ['sponge', 'wolf', 'tree', 'human', 'snake', 'scissors', 'fire'],
'fire' : ['paper', 'sponge', 'wolf', 'tree', 'human', 'snake', 'scissors'],
'scissors' : ['air', 'paper', 'sponge', 'wolf', 'tree', 'human', 'snake'],
'snake' : ['water', 'air', 'paper', 'sponge', 'wolf', 'tree', 'human'],
'human' : ['dragon', 'water', 'air', 'paper', 'sponge', 'wolf', 'tree'],
'tree' : ['devil', 'dragon', 'water', 'air', 'paper', 'sponge', 'wolf'],
'wolf' : ['lightning', 'devil', 'dragon', 'water', 'air', 'paper', 'sponge'],
'sponge' : ['gun', 'lightning', 'devil', 'dragon', 'water', 'air', 'paper'],
'paper' : ['rock', 'gun', 'lightning', 'devil', 'dragon', 'water', 'air'],
'air' : ['fire', 'rock', 'gun', 'lightning', 'devil', 'dragon', 'water'],
'lightning' : ['tree', 'human', 'snake', 'scissors', 'fire', 'rock', 'gun']
}
option = input().split(',')
if len(option) == 1:
option = ['scissors', 'paper', 'rock']
print("Okay, let's start")
while True:
user = input()
computer = random.choice(option)
if user == '!rating':
print(score)
continue
elif user == '!exit':
print('Bye!')
break
elif user not in option:
print(option)
print('Invalid input')
continue
if user == computer:
print('There is a draw ({})'.format(computer))
score += 50
elif computer in winning_cases[user]:
print('Well done. The computer chose {} and failed'.format(computer))
score += 100
elif computer not in winning_cases[user]:
print('Sorry, but the computer chose {}'.format(computer))
| [
11748,
4738,
198,
198,
7829,
796,
5128,
10786,
17469,
534,
1438,
25,
705,
8,
198,
4798,
7,
69,
6,
15496,
11,
1391,
7829,
92,
11537,
198,
198,
26675,
796,
657,
198,
8821,
796,
1280,
10786,
8821,
13,
14116,
11537,
198,
1640,
2494,
287... | 2.45262 | 897 |
# Copyright (c) 2015-2021 Patricio Cubillos and contributors.
# mc3 is open-source software under the MIT license (see LICENSE).
__all__ = [
'bin_array',
'residuals',
'chisq',
'dwt_chisq',
'log_prior',
'cred_region',
'ppf_uniform',
'ppf_gaussian',
'dwt_daub4',
'Loglike',
'Prior_transform',
]
import sys
import numpy as np
import scipy.stats as ss
import scipy.interpolate as si
from .. import utils as mu
sys.path.append(mu.ROOT + 'mc3/lib/')
import _binarray as ba
import _chisq as cs
import _dwt as dwt
def bin_array(data, binsize, uncert=None):
"""
Compute the binned weighted mean and standard deviation of an array
using 1/uncert**2 as weights.
Eq. (4.31) of Data Reduction and Error Analysis for the Physical
Sciences by Bevington & Robinson).
Parameters
----------
data: 1D ndarray
A time-series dataset.
binsize: Integer
Number of data points per bin.
uncert: 1D ndarray
Uncertainties of data (if None, assume that all data points have
same uncertainty).
Returns
-------
bindata: 1D ndarray
Mean-weighted binned data.
binunc: 1D ndarray
Standard deviation of the binned data points (returned only if
uncert is not None).
Notes
-----
If the last bin does not contain binsize elements, it will be
trnucated from the output.
Examples
--------
>>> import mc3.stats as ms
>>> ndata = 12
>>> data = np.array([0,1,2, 3,3,3, 3,3,4])
>>> uncert = np.array([3,1,1, 1,2,3, 2,2,4])
>>> binsize = 3
>>> # Binning, no weights:
>>> bindata = ms.bin_array(data, binsize)
>>> print(bindata)
[1. 3. 3.33333333]
>>> # Binning using uncertainties as weights:
>>> bindata, binstd = ms.bin_array(data, binsize, uncert)
>>> print(bindata)
[1.42105263 3. 3.11111111]
>>> print(binstd)
[0.6882472 0.85714286 1.33333333]
"""
if uncert is None:
return ba.binarray(np.array(data, dtype=np.double), int(binsize))
return ba.binarray(np.array(data, dtype=np.double), int(binsize),
np.array(uncert, dtype=np.double))
def residuals(model, data, uncert,
params=None, priors=None, priorlow=None, priorup=None):
"""
Calculate the residuals between a dataset and a model
Parameters
----------
model: 1D ndarray
Model fit of data.
data: 1D ndarray
Data set array fitted by model.
errors: 1D ndarray
Data uncertainties.
params: 1D float ndarray
Model parameters.
priors: 1D ndarray
Parameter prior values.
priorlow: 1D ndarray
Prior lower uncertainty.
priorup: 1D ndarray
Prior upper uncertainty.
Returns
-------
residuals: 1D ndarray
Residuals array.
Examples
--------
>>> import mc3.stats as ms
>>> # Compute chi-squared for a given model fitting a data set:
>>> data = np.array([1.1, 1.2, 0.9, 1.0])
>>> model = np.array([1.0, 1.0, 1.0, 1.0])
>>> uncert = np.array([0.1, 0.1, 0.1, 0.1])
>>> res = ms.residuals(model, data, uncert)
print(res)
[-1. -2. 1. 0.]
>>> # Now, say this is a two-parameter model, with a uniform and
>>> # a Gaussian prior, respectively:
>>> params = np.array([2.5, 5.5])
>>> priors = np.array([2.0, 5.0])
>>> plow = np.array([0.0, 1.0])
>>> pup = np.array([0.0, 1.0])
>>> res = ms.residuals(model, data, uncert, params, priors, plow, pup)
>>> print(res)
[-1. -2. 1. 0. 0.5]
"""
if params is None or priors is None or priorlow is None or priorup is None:
return cs.residuals(model, data, uncert)
iprior = (priorlow > 0) & (priorup > 0)
dprior = (params - priors)[iprior]
return cs.residuals(model, data, uncert, dprior,
priorlow[iprior], priorup[iprior])
def chisq(model, data, uncert,
params=None, priors=None, priorlow=None, priorup=None):
"""
Calculate chi-squared of a model fit to a data set:
chisq = sum{data points} ((data[i] -model[i])/error[i])**2.0
If params, priors, priorlow, and priorup are not None, calculate:
chisq = sum{data points} ((data[i] -model[i])/error[i])**2.0
+ sum{priors} ((params[j]-prior[j])/prioruncert[j])**2.0
Which is not chi-squared, but is the quantity to optimize when a
parameter has a Gaussian prior (equivalent to maximize the Bayesian
posterior probability).
Parameters
----------
model: 1D ndarray
Model fit of data.
data: 1D ndarray
Data set array fitted by model.
uncert: 1D ndarray
Data uncertainties.
params: 1D float ndarray
Model parameters.
priors: 1D ndarray
Parameter prior values.
priorlow: 1D ndarray
Left-sided prior standard deviation (param < prior).
A priorlow value of zero denotes a uniform prior.
priorup: 1D ndarray
Right-sided prior standard deviation (prior < param).
A priorup value of zero denotes a uniform prior.
Returns
-------
chisq: Float
The chi-squared value.
Examples
--------
>>> import mc3.stats as ms
>>> import numpy as np
>>> # Compute chi-squared for a given model fitting a data set:
>>> data = np.array([1.1, 1.2, 0.9, 1.0])
>>> model = np.array([1.0, 1.0, 1.0, 1.0])
>>> uncert = np.array([0.1, 0.1, 0.1, 0.1])
>>> chisq = ms.chisq(model, data, uncert)
print(chisq)
6.0
>>> # Now, say this is a two-parameter model, with a uniform and
>>> # a Gaussian prior, respectively:
>>> params = np.array([2.5, 5.5])
>>> priors = np.array([2.0, 5.0])
>>> plow = np.array([0.0, 1.0])
>>> pup = np.array([0.0, 1.0])
>>> chisq = ms.chisq(model, data, uncert, params, priors, plow, pup)
>>> print(chisq)
6.25
"""
if params is None or priors is None or priorlow is None or priorup is None:
return cs.chisq(model, data, uncert)
iprior = (priorlow > 0) & (priorup > 0)
dprior = (params - priors)[iprior]
return cs.chisq(model, data, uncert, dprior,
priorlow[iprior], priorup[iprior])
def dwt_chisq(model, data, params, priors=None, priorlow=None, priorup=None):
"""
Calculate -2*ln(likelihood) in a wavelet-base (a pseudo chi-squared)
based on Carter & Winn (2009), ApJ 704, 51.
Parameters
----------
model: 1D ndarray
Model fit of data.
data: 1D ndarray
Data set array fitted by model.
params: 1D float ndarray
Model parameters (including the tree noise parameters: gamma,
sigma_r, sigma_w; which must be the last three elements in params).
priors: 1D ndarray
Parameter prior values.
priorlow: 1D ndarray
Left-sided prior standard deviation (param < prior).
A priorlow value of zero denotes a uniform prior.
priorup: 1D ndarray
Right-sided prior standard deviation (prior < param).
A priorup value of zero denotes a uniform prior.
Returns
-------
chisq: Float
Wavelet-based (pseudo) chi-squared.
Notes
-----
- If the residuals array size is not of the form 2**N, the routine
zero-padds the array until this condition is satisfied.
- The current code only supports gamma=1.
Examples
--------
>>> import mc3.stats as ms
>>> import numpy as np
>>> # Compute chi-squared for a given model fitting a data set:
>>> data = np.array([2.0, 0.0, 3.0, -2.0, -1.0, 2.0, 2.0, 0.0])
>>> model = np.ones(8)
>>> params = np.array([1.0, 0.1, 0.1])
>>> chisq = ms.dwt_chisq(model, data, params)
>>> print(chisq)
1693.22308882
>>> # Now, say this is a three-parameter model, with a Gaussian prior
>>> # on the last parameter:
>>> priors = np.array([1.0, 0.2, 0.3])
>>> plow = np.array([0.0, 0.0, 0.1])
>>> pup = np.array([0.0, 0.0, 0.1])
>>> chisq = ms.dwt_chisq(model, data, params, priors, plow, pup)
>>> print(chisq)
1697.2230888243134
"""
if len(params) < 3:
with mu.Log() as log:
log.error('Wavelet chisq should have at least three parameters.')
if priors is None or priorlow is None or priorup is None:
return dwt.chisq(params, model, data)
iprior = (priorlow > 0) & (priorup > 0)
dprior = (params - priors)[iprior]
return dwt.chisq(params, model, data, dprior,
priorlow[iprior], priorup[iprior])
def log_prior(posterior, prior, priorlow, priorup, pstep):
"""
Compute the log(prior) for a given sample (neglecting constant terms).
This is meant to be the weight added by the prior to chi-square
when optimizing a Bayesian posterior. Therefore, there is a
constant offset with respect to the true -2*log(prior) that can
be neglected.
Parameters
----------
posterior: 1D/2D float ndarray
A parameter sample of shape [nsamples, nfree].
prior: 1D ndarray
Parameters priors. The type of prior is determined by priorlow
and priorup:
Gaussian: if both priorlow>0 and priorup>0
Uniform: else
The free parameters in prior must correspond to those
parameters contained in the posterior, i.e.:
len(prior[pstep>0]) = nfree.
priorlow: 1D ndarray
Lower prior uncertainties.
priorup: 1D ndarray
Upper prior uncertainties.
pstep: 1D ndarray
Parameter masking determining free (pstep>0), fixed (pstep==0),
and shared parameters.
Returns
-------
logp: 1D float ndarray
Sum of -2*log(prior):
A uniform prior returns logp = 0.0
A Gaussian prior returns logp = -0.5*(param-prior)**2/prior_uncert**2
A log-uniform prior returns logp = log(1/param)
Examples
--------
>>> import mc3.stats as ms
>>> import numpy as np
>>> # A posterior of three samples and two free parameters:
>>> post = np.array([[3.0, 2.0],
>>> [3.1, 1.0],
>>> [3.6, 1.5]])
>>> # Trivial case, uniform priors:
>>> prior = np.array([3.5, 0.0])
>>> priorlow = np.array([0.0, 0.0])
>>> priorup = np.array([0.0, 0.0])
>>> pstep = np.array([1.0, 1.0])
>>> log_prior = ms.log_prior(post, prior, priorlow, priorup, pstep)
>>> print(log_prior)
[0. 0. 0.]
>>> # Gaussian prior on first parameter:
>>> prior = np.array([3.5, 0.0])
>>> priorlow = np.array([0.1, 0.0])
>>> priorup = np.array([0.1, 0.0])
>>> pstep = np.array([1.0, 1.0])
>>> log_prior = ms.log_prior(post, prior, priorlow, priorup, pstep)
>>> print(log_prior)
[25. 16. 1.]
>>> # Posterior comes from a 3-parameter model, with second fixed:
>>> prior = np.array([3.5, 0.0, 0.0])
>>> priorlow = np.array([0.1, 0.0, 0.0])
>>> priorup = np.array([0.1, 0.0, 0.0])
>>> pstep = np.array([1.0, 0.0, 1.0])
>>> log_prior = ms.log_prior(post, prior, priorlow, priorup, pstep)
>>> print(log_prior)
[25. 16. 1.]
>>> # Also works for a single 1D params array:
>>> params = np.array([3.0, 2.0])
>>> prior = np.array([3.5, 0.0])
>>> priorlow = np.array([0.1, 0.0])
>>> priorup = np.array([0.1, 0.0])
>>> pstep = np.array([1.0, 1.0])
>>> log_prior = ms.log_prior(params, prior, priorlow, priorup, pstep)
>>> print(log_prior)
25.0
"""
posterior = np.atleast_2d(posterior)
ifree = np.where(pstep > 0)[0]
nfree = len(ifree)
dprior = posterior - prior[ifree]
ifreeprior = np.where((priorlow[ifree]>0) & (priorup[ifree]>0))[0]
ilogprior = np.where(priorlow[ifree]<0)[0]
for i in range(nfree):
if i in ifreeprior:
dprior[dprior[:,i]<0,i] /= priorlow[ifree][i]
dprior[dprior[:,i]>0,i] /= priorup [ifree][i]
elif i in ilogprior:
dprior[:,i] = 2.0*np.log(posterior[:,i])
else:
dprior[:,i] = 0.0
logp = -0.5*np.sum(dprior**2, axis=1)
if np.size(logp) == 1:
return logp[0]
return logp
def cred_region(posterior=None, quantile=0.6827, pdf=None, xpdf=None):
"""
Compute the highest-posterior-density credible region for a
posterior distribution.
Parameters
----------
posterior: 1D float ndarray
A posterior distribution.
quantile: Float
The HPD quantile considered for the credible region.
A value in the range: (0, 1).
pdf: 1D float ndarray
A smoothed-interpolated PDF of the posterior distribution.
xpdf: 1D float ndarray
The X location of the pdf values.
Returns
-------
pdf: 1D float ndarray
A smoothed-interpolated PDF of the posterior distribution.
xpdf: 1D float ndarray
The X location of the pdf values.
HPDmin: Float
The minimum density in the percentile-HPD region.
Example
-------
>>> import numpy as np
>>> import mc3.stats as ms
>>> # Test for a Normal distribution:
>>> npoints = 100000
>>> posterior = np.random.normal(0, 1.0, npoints)
>>> pdf, xpdf, HPDmin = ms.cred_region(posterior)
>>> # 68% HPD credible-region boundaries (somewhere close to +/-1.0):
>>> print(np.amin(xpdf[pdf>HPDmin]), np.amax(xpdf[pdf>HPDmin]))
>>> # Re-compute HPD for the 95% (withour recomputing the PDF):
>>> pdf, xpdf, HPDmin = ms.cred_region(pdf=pdf, xpdf=xpdf, quantile=0.9545)
>>> print(np.amin(xpdf[pdf>HPDmin]), np.amax(xpdf[pdf>HPDmin]))
"""
if pdf is None and xpdf is None:
# Thin if posterior has too many samples (> 120k):
thinning = np.amax([1, int(np.size(posterior)/120000)])
# Compute the posterior's PDF:
kernel = ss.gaussian_kde(posterior[::thinning])
# Remove outliers:
mean = np.mean(posterior)
std = np.std(posterior)
k = 6
lo = np.amax([mean-k*std, np.amin(posterior)])
hi = np.amin([mean+k*std, np.amax(posterior)])
# Use a Gaussian kernel density estimate to trace the PDF:
x = np.linspace(lo, hi, 100)
# Interpolate-resample over finer grid (because kernel.evaluate
# is expensive):
f = si.interp1d(x, kernel.evaluate(x))
xpdf = np.linspace(lo, hi, 3000)
pdf = f(xpdf)
# Sort the PDF in descending order:
ip = np.argsort(pdf)[::-1]
# Sorted CDF:
cdf = np.cumsum(pdf[ip])
# Indices of the highest posterior density:
iHPD = np.where(cdf >= quantile*cdf[-1])[0][0]
# Minimum density in the HPD region:
HPDmin = np.amin(pdf[ip][0:iHPD])
return pdf, xpdf, HPDmin
class ppf_uniform(object):
"""
Percent-point function (PPF) for a uniform function between
pmin and pmax. Also known as inverse CDF or quantile function.
Parameters
----------
pmin: Float
Lower boundary of the uniform function.
pmax: Float
Upper boundary of the uniform function.
Returns
-------
ppf: Callable
The uniform's PPF.
Examples
--------
>>> import mc3.stats as ms
>>> ppf_u = ms.ppf_uniform(-10.0, 10.0)
>>> # The domain of the output function is [0,1]:
>>> print(ppf_u(0.0), ppf_u(0.5), ppf_u(1.0))
-10.0 0.0 10.0
>>> # Also works for np.array inputs:
>>> print(ppf_u(np.array([0.0, 0.5, 1.0])))
array([-10., 0., 10.])
"""
class ppf_gaussian(object):
"""
Percent-point function (PPF) for a two-sided Gaussian function
Also known as inverse CDF or quantile function.
Parameters
----------
loc: Float
Center of the Gaussian function.
lo: Float
Left-sided standard deviation (for values x < loc).
up: Float
Right-sided standard deviation (for values x > loc).
Returns
-------
ppf: Callable
The Gaussian's PPF.
Examples
--------
>>> import mc3.stats as ms
>>> ppf_g = ms.ppf_gaussian(0.0, 1.0, 1.0)
>>> # The domain of the output function is (0,1):
>>> print(ppf_g(1e-10), ppf_g(0.5), ppf_g(1.0-1e-10))
(-6.361340902404056, 0.0, 6.361340889697422)
>>> # Also works for np.array inputs:
>>> print(ppf_g(np.array([1e-10, 0.5, 1-1e-10])))
[-6.3613409 0. 6.36134089]
"""
def dwt_daub4(array, inverse=False):
"""
1D discrete wavelet transform using the Daubechies 4-parameter wavelet
Parameters
----------
array: 1D ndarray
Data array to which to apply the DWT.
inverse: bool
If False, calculate the DWT,
If True, calculate the inverse DWT.
Notes
-----
The input vector must have length 2**M with M an integer, otherwise
the output will zero-padded to the next size of the form 2**M.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import mc3.stats as ms
>>> # Calculate the inverse DWT for a unit vector:
>>> nx = 1024
>>> e4 = np.zeros(nx)
>>> e4[4] = 1.0
>>> ie4 = ms.dwt_daub4(e4, True)
>>> # Plot the inverse DWT:
>>> plt.figure(0)
>>> plt.clf()
>>> plt.plot(np.arange(nx), ie4)
"""
isign = -1 if inverse else 1
return dwt.daub4(np.array(array), isign)
class Loglike(object):
"""Wrapper to compute log(likelihood)"""
class Prior_transform(object):
"""Wrapper to compute the PPF of a set of parameters."""
| [
2,
15069,
357,
66,
8,
1853,
12,
1238,
2481,
3208,
1173,
952,
7070,
359,
418,
290,
20420,
13,
198,
2,
36650,
18,
318,
1280,
12,
10459,
3788,
739,
262,
17168,
5964,
357,
3826,
38559,
24290,
737,
198,
198,
834,
439,
834,
796,
685,
19... | 2.291819 | 7,652 |
"""
Dwie liczby naturalne są „przyjaciółkami jeżeli zbiory cyfr z których zbudowane są liczby
są identyczne. Na przykład: 123 i 321, 211 i 122, 35 3553. Dana jest tablica T[N][N] wypełniona
liczbami naturalnymi. Proszę napisać funkcję, która dla tablicy T zwraca ile elementów tablicy
sąsiaduje wyłącznie z przyjaciółkami.
"""
from random import randint
N = 5
array = [[randint(1, 100) for _ in range(N)] for _ in range(N)]
print(friends_numbers(array))
| [
37811,
198,
35,
86,
494,
3476,
89,
1525,
3288,
710,
264,
128,
227,
564,
252,
1050,
7357,
30482,
72,
10205,
41615,
74,
6277,
11223,
129,
120,
43733,
1976,
8482,
652,
3075,
8310,
1976,
479,
83,
10205,
563,
354,
1976,
65,
463,
322,
153... | 2.165877 | 211 |
# Copyright 2020 HPS/SAFARI Research Groups
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Author: HPS Research Group
Date: 04/27/2020
Description: Globally tracks objects declared in Scarab Batch jobfiles.
The purpose of this file is to provide an interface for globally tracking all declared objects.
This is useful because objects are declared by users in jobfiles, and are usually not directly
operated on by the user. Typical use cases are the user directs Scarab Batch on how to operate
on the objects. Scarab Batch uses the globally tracked objects and the directives from the user
(e.g., run, progress, stat) to perform the appropriate task.
"""
import sys
import os
sys.path.append(os.path.dirname(__file__))
from scarab_batch_types import *
from batch_manager import *
from command import *
import scarab_stats
# Declare global objects:
scarab_run_manager = ScarabRunManager()
program_manager = ObjectManager()
checkpoint_manager = ObjectManager()
mix_manager = ObjectManager()
collection_manager = ObjectManager()
| [
2,
220,
15069,
12131,
367,
3705,
14,
4090,
37,
33604,
4992,
27441,
198,
2,
198,
2,
220,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
286,
198,
2,
220,
428,
3788,
290,
3917,
10314,
3696,
... | 3.800738 | 542 |
EVENTS_PAGE = 'events.html'
HED_TOOLS_HOME_PAGE = 'hed-tools-home.html'
SCHEMA_PAGE = 'schema.html'
SIDECAR_PAGE = 'sidecar.html'
SPREADSHEET_PAGE = 'spreadsheet.html'
STRING_PAGE = 'string.html'
SERVICES_PAGE = 'services.html'
| [
20114,
15365,
62,
4537,
8264,
796,
705,
31534,
13,
6494,
6,
201,
198,
39,
1961,
62,
10468,
3535,
50,
62,
39069,
62,
4537,
8264,
796,
705,
704,
12,
31391,
12,
11195,
13,
6494,
6,
201,
198,
50,
3398,
27630,
62,
4537,
8264,
796,
705,... | 2.117117 | 111 |
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
from collections import namedtuple
from ..representation import ContainerRepresentation
from ..utils import is_single_metric_source, get_supported_representations
from ..presenters import BasePresenter
from ..config import ConfigValidator, NumberField, StringField, ConfigError
from ..dependency import ClassProvider, UnregisteredProviderException
from ..utils import zipped_transform, get_parameter_value_from_config, contains_any
PerImageMetricResult = namedtuple('PerImageMetricResult', ['metric_name', 'metric_type', 'result', 'direction'])
class Metric(ClassProvider):
"""
Interface for evaluating metrics.
"""
__provider_type__ = 'metric'
annotation_types = ()
prediction_types = ()
description = ""
@classmethod
def configure(self):
"""
Specifies configuration structure for metric entry.
"""
pass
@classmethod
def validate_config(cls, config, fetch_only=False, uri_prefix=''):
"""
Validate that metric entry meets all configuration structure requirements.
"""
errors = []
if cls.__name__ == Metric.__name__:
metric_provider = config.get('type')
if not metric_provider:
error = ConfigError(
'type does not found', config, uri_prefix or 'metric', validation_scheme=cls.validation_scheme()
)
if not fetch_only:
raise error
errors.append(error)
return errors
try:
metric_cls = cls.resolve(metric_provider)
except UnregisteredProviderException as exception:
if not fetch_only:
raise exception
errors.append(
ConfigError("metric {} unregistered".format(metric_provider),
config, uri_prefix or 'metric', validation_scheme=cls.validation_scheme())
)
return errors
errors.extend(metric_cls.validate_config(config, fetch_only=fetch_only, uri_prefix=uri_prefix))
return errors
metric_uri = uri_prefix or 'metrics.{}'.format(cls.__provider__)
return ConfigValidator(
metric_uri, on_extra_argument=ConfigValidator.ERROR_ON_EXTRA_ARGUMENT, fields=cls.parameters()
).validate(config, fetch_only=fetch_only, validation_scheme=cls.validation_scheme())
@classmethod
| [
37811,
198,
15269,
357,
66,
8,
2864,
12,
1238,
2481,
8180,
10501,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 2.555556 | 1,197 |
from distutils.core import setup
from src import automagic_imaging
setup(
name='Automagic Imaging',
version=automagic_imaging.__version__,
url='https://github.com/univ-of-utah-marriott-library-apple/radmind_auto_image_creator',
author='Pierce Darragh, Marriott Library IT Services',
author_email='mlib-its-mac-github@lists.utah.edu',
description=('A group of scripts to set up automated OS X imaging with Radmind.'),
license='MIT',
packages=['automagic_imaging',
'automagic_imaging.scripts'],
package_dir={'automagic_imaging': 'src/automagic_imaging',
'automagic_imaging.scripts': 'src/automagic_imaging/scripts'},
scripts=['scripts/radmind_auto_image_creator.py'],
classifiers=[
'Development Status :: 5 - Stable',
'Environment :: Console',
'Environment :: MacOS X',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7'
],
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
6738,
12351,
1330,
3557,
9083,
62,
320,
3039,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
38062,
9083,
48656,
3256,
198,
220,
220,
220,
2196,
28,
2306,
296,
9083,
62,
320,
3039,
... | 2.604494 | 445 |
# Copyright 2019 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from oslo_log import log
from tobiko.openstack import topology
from tobiko.tripleo import overcloud
from tobiko.tripleo import undercloud
LOG = log.getLogger(__name__)
| [
2,
15069,
13130,
2297,
10983,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13... | 3.355372 | 242 |
from peewee import *
from .base import OutputBase
| [
6738,
613,
413,
1453,
1330,
1635,
198,
6738,
764,
8692,
1330,
25235,
14881,
628,
628,
628,
628,
628,
628,
628,
628,
628
] | 3.045455 | 22 |
import torch
import torch.nn as nn
import torch.optim as optim
import os
import random
from CNN.resnet import BasicBlock
import torch.nn.functional as F
#g_filters = [384, 192, 96, 48, 3]
#g_strids = [1, 2, 2, 2]
# filters = [16, 32, 64, 128, 256, 512]
# strides = [2,1,2,1,2,1]
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
11748,
28686,
198,
11748,
4738,
198,
6738,
8100,
13,
411,
3262,
1330,
14392,
12235,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
37... | 2.504274 | 117 |
# Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Functions to handle git staged content.
Inspired from https://raw.githubusercontent.com/hallettj/git-format-staged/master/git-format-staged
Original author: Jesse Hallett <jesse@sitr.us>
"""
import re
import subprocess
# Parse output from `git diff-index`
| [
2,
220,
220,
220,
220,
15069,
12131,
11,
17356,
9075,
268,
11,
6920,
1462,
25,
5568,
13,
71,
323,
268,
31,
14816,
13,
785,
198,
2,
198,
2,
220,
220,
220,
220,
2142,
286,
366,
45,
5013,
4914,
1600,
281,
45780,
11361,
17050,
326,
... | 3.168675 | 332 |
"""
execute a notebook file hierarchy
run_notebooks orig_notebook_dir file_re
run_notebooks autograded "lab_wk9*ipynb"
"""
from pathlib import Path
import click
from .utils import working_directory
import shutil
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
@click.command()
@click.argument('notebook_folder',type=str)
@click.argument('file_re',type=str)
| [
37811,
198,
41049,
257,
20922,
2393,
18911,
198,
198,
5143,
62,
11295,
12106,
1796,
62,
11295,
2070,
62,
15908,
2393,
62,
260,
198,
198,
5143,
62,
11295,
12106,
1960,
519,
81,
5286,
366,
23912,
62,
43021,
24,
9,
541,
2047,
65,
1,
19... | 3.129032 | 124 |
import requests
from urllib.parse import urljoin, urlparse
from bs4 import BeautifulSoup
import re
BASE_SITE = "https://www.jetpunk.com"
BASE_URL = "/tags/multiple-choice"
if __name__ == "__main__":
page = requests.get(BASE_SITE + BASE_URL)
soup = BeautifulSoup(page.content, "html.parser")
links = soup.find_all("a", href=True)
quizzes = set()
for link in links:
if link["href"].startswith("/quizzes/") and link["href"]:
url = urljoin(link["href"], urlparse(link["href"]).path)
if url != "/quizzes/random":
quizzes.add(BASE_SITE + url)
data = []
for i, quiz in enumerate(quizzes, 1):
qhtml = requests.get(quiz).content
qname = get_quiz_name(qhtml)
questions = get_quiz_questions_and_possible_answers(qhtml)
if not questions:
continue
ahtml = requests.get(quiz + "/stats").content
answers = get_quiz_answers(ahtml)
if not answers:
continue
data = (qname, questions, answers)
formatted = into_csv_format(i, data)
print(f"Finished loading quiz {i}")
with open('jetpunk.csv', 'a') as f:
f.write(formatted)
| [
11748,
7007,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
22179,
11,
19016,
29572,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
302,
198,
198,
33,
11159,
62,
50,
12709,
796,
366,
5450,
1378,
2503,
13,
31173,
30354... | 2.215328 | 548 |
import pickle
import os
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from tabulate import tabulate
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/drive.metadata']
if __name__ == '__main__':
main() | [
11748,
2298,
293,
201,
198,
11748,
28686,
201,
198,
6738,
23645,
499,
291,
75,
1153,
13,
67,
40821,
1330,
1382,
201,
198,
6738,
23645,
62,
18439,
62,
12162,
1071,
8019,
13,
11125,
1330,
2262,
4262,
4677,
37535,
201,
198,
6738,
23645,
... | 2.873134 | 134 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None | [
2,
30396,
329,
257,
13934,
5509,
10139,
13,
198,
2,
1398,
12200,
19667,
25,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
2124,
2599,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
2116,
13,
2100,
796,
2124,
1... | 2.175676 | 74 |
# Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import unittest
from utility.utility import (
create_error_response,
strip_begin_end_key,
list_difference,
encrypt_data,
decrypt_data,
decrypted_response,
verify_data_hash,
human_read_to_byte,
)
| [
2,
15069,
13130,
8180,
10501,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.611336 | 247 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from difflib import Differ
from pathlib import Path
from typing import Container
from ..show_functions import show_via_cmd
from ..typedefs import GEN_PATH_FUNC, PATH_FUNC, SHOW_FUNC
from .path_builders import (build_via_suffix_change, unique_name_via_number,
unique_stem_via_suffix)
replace_threshold: int = 3
str_between_lines: str = "\n " + "#"*100 + "\n "*2
out_ext: str = ".txt"
diff_ext: str = ".diff"
extensions: Container[str] = (".py", ".pyx")
build_dir: Path = Path("build/")
py_dir: Path = Path("py/")
pyx_dir: Path = Path("pyx/")
diff_dir: Path = Path("diff/")
path_func: GEN_PATH_FUNC = build_via_suffix_change
show_func: SHOW_FUNC = show_via_cmd
unique_stem_func: PATH_FUNC = unique_stem_via_suffix
unique_name_func: GEN_PATH_FUNC = unique_name_via_number
save_as_diff: bool = True
create_dirs: bool = True
differ: Differ = Differ()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
814,
8019,
1330,
10631,
263,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
43101,
198,
198... | 2.482667 | 375 |
import hashlib # noqa: F401
from hashlib import md5 # noqa: F401
from hashlib import sha256 # noqa: F401
from hashlib import sha512 # noqa: F401
| [
11748,
12234,
8019,
220,
1303,
645,
20402,
25,
376,
21844,
198,
6738,
12234,
8019,
1330,
45243,
20,
220,
1303,
645,
20402,
25,
376,
21844,
198,
6738,
12234,
8019,
1330,
427,
64,
11645,
220,
1303,
645,
20402,
25,
376,
21844,
198,
6738,
... | 2.709091 | 55 |
from fluent import sender
from rest.api.constants.env_constants import EnvConstants
from rest.api.constants.env_init import EnvInit
from rest.api.schedulers.base_scheduler import BaseScheduler
from rest.service.fluentd import Fluentd
from rest.utils.docker_utils import DockerUtils
| [
6738,
43472,
1330,
29788,
198,
198,
6738,
1334,
13,
15042,
13,
9979,
1187,
13,
24330,
62,
9979,
1187,
1330,
2039,
85,
34184,
1187,
198,
6738,
1334,
13,
15042,
13,
9979,
1187,
13,
24330,
62,
15003,
1330,
2039,
85,
31768,
198,
6738,
133... | 3.264368 | 87 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
from __future__ import absolute_import, division, unicode_literals
import secrets
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
2,
198,
2,
198,
2,
770,
8090,
6127,
5178,
318,
2426,
284,
262,
2846,
286,
262,
29258,
5094,
198,
2,
13789,
11,
410,
13,
362,
13,
15,
13,
1002,
257,
4866,
286,
262,
4904,
43,
373,
407,
9387,... | 3.185567 | 97 |
#
# Autogenerated by Thrift Compiler (0.9.1)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class ErrCode:
"""
Enum for the Error Codes returned by the Thrift Service APIs
"""
EOK = 0
INVALID_ARGUMENTS = 1
NO_RECORDS_FOUND = 2
DAEMON_NOT_RESPONDING = 3
ROUTE_ADD_RETURN_FAILED = 1001
ROUTE_ADD_RETURN_TABLEID_INVALID = 1002
ROUTE_ADD_RETURN_IFLIDX_INVALID = 1003
ROUTE_ADD_RETURN_LCLADDR_INVALID = 1004
ROUTE_ADD_RETURN_PREFIX_INVALID = 1005
ROUTE_ADD_RETURN_GWHANDLE_INVALID = 1006
ROUTE_ADD_RETURN_DYNIFL_CREATE_FAILED = 1007
ROUTE_ADD_RETURN_MASK2SHORT = 1008
ROUTE_ADD_RETURN_BAD_NEXTHOP = 1009
ROUTE_ADD_RETURN_NEXTHOP_ECMP_LIMIT = 1010
ROUTE_ADD_RETURN_MASK2LONG = 1011
ROUTE_ADD_RETURN_RTT_NOT_READY = 1012
ROUTE_DELETE_RETURN_ROUTE_NOTFOUND = 1013
ROUTE_DELETE_RETURN_TABLE_NOTFOUND = 1014
ROUTE_DELETE_RETURN_MASK2SHORT = 1015
ROUTE_DELETE_RETURN_MASK2LONG = 1016
ROUTE_DELETE_RETURN_COOKIE_MISMATCH = 1017
FW_FILTER_NOT_FOUND = 1500
FW_FILTER_IN_USE = 1501
FW_FILTER_ALREADY_EXISTS = 1502
FW_FILTER_CONFIG_ERR = 1503
FW_TERM_NOT_FOUND = 1504
FW_TERM_ALREADY_EXISTS = 1505
FW_TERM_CONFIG_ERR = 1506
FW_TERM_CONFLICT_ERR = 1507
FW_POLICER_NOT_FOUND = 1508
FW_POLICER_IN_USE = 1509
FW_POLICER_ALREADY_EXISTS = 1510
FW_POLICER_CONFIG_ERR = 1511
FW_ATTACH_POINT_NOT_FOUND = 1512
FW_ATTACH_POINT_IN_USE = 1513
FW_DFW_INDEX_EXHAUSTED = 1514
FW_OUT_OF_MEMORY_ERR = 1515
FW_INTERNAL_ERR = 1516
FW_TIMER_NOT_FOUND = 1517
FW_TIMER_IN_USE = 1518
FW_TIMER_ALREADY_EXISTS = 1519
FW_TIMER_CONFIG_ERR = 1520
FW_TNP_SESSION_ERR = 1521
FW_PREFIX_LIST_NOT_FOUND = 1522
FW_FCU_NOT_FOUND = 1523
FW_INVALID_TERM = 1524
FW_TERM_CONTAINS_NO_MATCH = 1525
FW_TERM_MATCH_INVALID = 1526
FW_TERM_ACTION_INVALID = 1527
FW_TERM_END_FAILED = 1528
FW_FILTER_TRANS_SEND = 1529
FW_FILTER_TRANS_ALLOC = 1530
FW_TERM_START_FAILED = 1531
FW_FILTER_WRONG_DIRECTION = 1532
FW_POLICER_INVALID_PARAMETER = 1533
FW_POLICER_ACTION_DISCARD = 1534
FW_FILTER_HANDLE_ALLOC = 1535
FW_FILTER_COUNTER_ADD = 1536
FW_FILTER_STATS_TRANS_ALLOC = 1537
FW_FILTER_STATS_TRANS_SEND = 1538
FW_POLICER_STATS_TRANS_ADD = 1539
FW_STATS_NOT_AVAILABLE = 1540
GENERAL_ERROR = 2000
_VALUES_TO_NAMES = {
0: "EOK",
1: "INVALID_ARGUMENTS",
2: "NO_RECORDS_FOUND",
3: "DAEMON_NOT_RESPONDING",
1001: "ROUTE_ADD_RETURN_FAILED",
1002: "ROUTE_ADD_RETURN_TABLEID_INVALID",
1003: "ROUTE_ADD_RETURN_IFLIDX_INVALID",
1004: "ROUTE_ADD_RETURN_LCLADDR_INVALID",
1005: "ROUTE_ADD_RETURN_PREFIX_INVALID",
1006: "ROUTE_ADD_RETURN_GWHANDLE_INVALID",
1007: "ROUTE_ADD_RETURN_DYNIFL_CREATE_FAILED",
1008: "ROUTE_ADD_RETURN_MASK2SHORT",
1009: "ROUTE_ADD_RETURN_BAD_NEXTHOP",
1010: "ROUTE_ADD_RETURN_NEXTHOP_ECMP_LIMIT",
1011: "ROUTE_ADD_RETURN_MASK2LONG",
1012: "ROUTE_ADD_RETURN_RTT_NOT_READY",
1013: "ROUTE_DELETE_RETURN_ROUTE_NOTFOUND",
1014: "ROUTE_DELETE_RETURN_TABLE_NOTFOUND",
1015: "ROUTE_DELETE_RETURN_MASK2SHORT",
1016: "ROUTE_DELETE_RETURN_MASK2LONG",
1017: "ROUTE_DELETE_RETURN_COOKIE_MISMATCH",
1500: "FW_FILTER_NOT_FOUND",
1501: "FW_FILTER_IN_USE",
1502: "FW_FILTER_ALREADY_EXISTS",
1503: "FW_FILTER_CONFIG_ERR",
1504: "FW_TERM_NOT_FOUND",
1505: "FW_TERM_ALREADY_EXISTS",
1506: "FW_TERM_CONFIG_ERR",
1507: "FW_TERM_CONFLICT_ERR",
1508: "FW_POLICER_NOT_FOUND",
1509: "FW_POLICER_IN_USE",
1510: "FW_POLICER_ALREADY_EXISTS",
1511: "FW_POLICER_CONFIG_ERR",
1512: "FW_ATTACH_POINT_NOT_FOUND",
1513: "FW_ATTACH_POINT_IN_USE",
1514: "FW_DFW_INDEX_EXHAUSTED",
1515: "FW_OUT_OF_MEMORY_ERR",
1516: "FW_INTERNAL_ERR",
1517: "FW_TIMER_NOT_FOUND",
1518: "FW_TIMER_IN_USE",
1519: "FW_TIMER_ALREADY_EXISTS",
1520: "FW_TIMER_CONFIG_ERR",
1521: "FW_TNP_SESSION_ERR",
1522: "FW_PREFIX_LIST_NOT_FOUND",
1523: "FW_FCU_NOT_FOUND",
1524: "FW_INVALID_TERM",
1525: "FW_TERM_CONTAINS_NO_MATCH",
1526: "FW_TERM_MATCH_INVALID",
1527: "FW_TERM_ACTION_INVALID",
1528: "FW_TERM_END_FAILED",
1529: "FW_FILTER_TRANS_SEND",
1530: "FW_FILTER_TRANS_ALLOC",
1531: "FW_TERM_START_FAILED",
1532: "FW_FILTER_WRONG_DIRECTION",
1533: "FW_POLICER_INVALID_PARAMETER",
1534: "FW_POLICER_ACTION_DISCARD",
1535: "FW_FILTER_HANDLE_ALLOC",
1536: "FW_FILTER_COUNTER_ADD",
1537: "FW_FILTER_STATS_TRANS_ALLOC",
1538: "FW_FILTER_STATS_TRANS_SEND",
1539: "FW_POLICER_STATS_TRANS_ADD",
1540: "FW_STATS_NOT_AVAILABLE",
2000: "GENERAL_ERROR",
}
_NAMES_TO_VALUES = {
"EOK": 0,
"INVALID_ARGUMENTS": 1,
"NO_RECORDS_FOUND": 2,
"DAEMON_NOT_RESPONDING": 3,
"ROUTE_ADD_RETURN_FAILED": 1001,
"ROUTE_ADD_RETURN_TABLEID_INVALID": 1002,
"ROUTE_ADD_RETURN_IFLIDX_INVALID": 1003,
"ROUTE_ADD_RETURN_LCLADDR_INVALID": 1004,
"ROUTE_ADD_RETURN_PREFIX_INVALID": 1005,
"ROUTE_ADD_RETURN_GWHANDLE_INVALID": 1006,
"ROUTE_ADD_RETURN_DYNIFL_CREATE_FAILED": 1007,
"ROUTE_ADD_RETURN_MASK2SHORT": 1008,
"ROUTE_ADD_RETURN_BAD_NEXTHOP": 1009,
"ROUTE_ADD_RETURN_NEXTHOP_ECMP_LIMIT": 1010,
"ROUTE_ADD_RETURN_MASK2LONG": 1011,
"ROUTE_ADD_RETURN_RTT_NOT_READY": 1012,
"ROUTE_DELETE_RETURN_ROUTE_NOTFOUND": 1013,
"ROUTE_DELETE_RETURN_TABLE_NOTFOUND": 1014,
"ROUTE_DELETE_RETURN_MASK2SHORT": 1015,
"ROUTE_DELETE_RETURN_MASK2LONG": 1016,
"ROUTE_DELETE_RETURN_COOKIE_MISMATCH": 1017,
"FW_FILTER_NOT_FOUND": 1500,
"FW_FILTER_IN_USE": 1501,
"FW_FILTER_ALREADY_EXISTS": 1502,
"FW_FILTER_CONFIG_ERR": 1503,
"FW_TERM_NOT_FOUND": 1504,
"FW_TERM_ALREADY_EXISTS": 1505,
"FW_TERM_CONFIG_ERR": 1506,
"FW_TERM_CONFLICT_ERR": 1507,
"FW_POLICER_NOT_FOUND": 1508,
"FW_POLICER_IN_USE": 1509,
"FW_POLICER_ALREADY_EXISTS": 1510,
"FW_POLICER_CONFIG_ERR": 1511,
"FW_ATTACH_POINT_NOT_FOUND": 1512,
"FW_ATTACH_POINT_IN_USE": 1513,
"FW_DFW_INDEX_EXHAUSTED": 1514,
"FW_OUT_OF_MEMORY_ERR": 1515,
"FW_INTERNAL_ERR": 1516,
"FW_TIMER_NOT_FOUND": 1517,
"FW_TIMER_IN_USE": 1518,
"FW_TIMER_ALREADY_EXISTS": 1519,
"FW_TIMER_CONFIG_ERR": 1520,
"FW_TNP_SESSION_ERR": 1521,
"FW_PREFIX_LIST_NOT_FOUND": 1522,
"FW_FCU_NOT_FOUND": 1523,
"FW_INVALID_TERM": 1524,
"FW_TERM_CONTAINS_NO_MATCH": 1525,
"FW_TERM_MATCH_INVALID": 1526,
"FW_TERM_ACTION_INVALID": 1527,
"FW_TERM_END_FAILED": 1528,
"FW_FILTER_TRANS_SEND": 1529,
"FW_FILTER_TRANS_ALLOC": 1530,
"FW_TERM_START_FAILED": 1531,
"FW_FILTER_WRONG_DIRECTION": 1532,
"FW_POLICER_INVALID_PARAMETER": 1533,
"FW_POLICER_ACTION_DISCARD": 1534,
"FW_FILTER_HANDLE_ALLOC": 1535,
"FW_FILTER_COUNTER_ADD": 1536,
"FW_FILTER_STATS_TRANS_ALLOC": 1537,
"FW_FILTER_STATS_TRANS_SEND": 1538,
"FW_POLICER_STATS_TRANS_ADD": 1539,
"FW_STATS_NOT_AVAILABLE": 1540,
"GENERAL_ERROR": 2000,
}
class RetStatus:
"""
Data type for Error Handling
Every API returns this under all circumstances.
When there are API-specific return values, they are nested with
this data type.
ErrCode is 0 (EOK) when API invocation is a SUCCESS.
Errcode is a negative integer when API invocation is a FAILURE.
ErrStr is valid only when ErrCode != 0.
Attributes:
- err_code: Error code
- err_str: Error string
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'err_code', None, None, ), # 1
(2, TType.STRING, 'err_str', None, None, ), # 2
)
| [
2,
198,
2,
5231,
519,
877,
515,
416,
16283,
2135,
3082,
5329,
357,
15,
13,
24,
13,
16,
8,
198,
2,
198,
2,
8410,
5626,
48483,
4725,
48481,
7013,
15986,
311,
11335,
14603,
7013,
35876,
25003,
7013,
15986,
8410,
2751,
198,
2,
198,
2,... | 1.92815 | 4,064 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# [START documentai_process_ocr_document]
# TODO(developer): Uncomment these variables before running the sample.
# project_id= 'YOUR_PROJECT_ID'
# location = 'YOUR_PROJECT_LOCATION' # Format is 'us' or 'eu'
# processor_id = 'YOUR_PROCESSOR_ID' # Create processor in Cloud Console
# file_path = '/path/to/local/pdf'
def layout_to_text(layout: dict, text: str) -> str:
"""
Document AI identifies text in different parts of the document by their
offsets in the entirity of the document's text. This function converts
offsets to a string.
"""
response = ""
# If a text segment spans several lines, it will
# be stored in different text segments.
for segment in layout.text_anchor.text_segments:
start_index = (
int(segment.start_index)
if segment in layout.text_anchor.text_segments
else 0
)
end_index = int(segment.end_index)
response += text[start_index:end_index]
return response
# [END documentai_process_ocr_document]
| [
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.02243 | 535 |
from django.db import models
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
UserModel = get_user_model()
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
13,
27530,
1330,
14041,
6030,
198,
6738,
42625,
14208... | 3.287671 | 73 |
#program to convert km to mph
# Get KM imput
kilometers = float(input("Enter value in kilometers: "))
# the conversion factor
conv_fac = 0.621371
# calculating miles
miles = kilometers * conv_fac
print('%0.2f kilometers is equal to %0.2f miles' %(kilometers,miles))
| [
2,
23065,
284,
10385,
10571,
284,
16462,
198,
2,
3497,
46646,
848,
315,
198,
34553,
40077,
796,
12178,
7,
15414,
7203,
17469,
1988,
287,
18212,
25,
366,
4008,
198,
198,
2,
262,
11315,
5766,
198,
42946,
62,
38942,
796,
657,
13,
5237,
... | 3.116279 | 86 |
# scmutil.py - Mercurial core utility functions
#
# Copyright Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import errno
import glob
import hashlib
import os
import re
import socket
import subprocess
import weakref
from .i18n import _
from .node import (
bin,
hex,
nullid,
nullrev,
short,
wdirid,
wdirrev,
)
from . import (
encoding,
error,
match as matchmod,
obsolete,
obsutil,
pathutil,
phases,
policy,
pycompat,
revsetlang,
similar,
smartset,
url,
util,
vfs,
)
from .utils import (
procutil,
stringutil,
)
if pycompat.iswindows:
from . import scmwindows as scmplatform
else:
from . import scmposix as scmplatform
parsers = policy.importmod(r'parsers')
termsize = scmplatform.termsize
class status(tuple):
'''Named tuple with a list of files per status. The 'deleted', 'unknown'
and 'ignored' properties are only relevant to the working copy.
'''
__slots__ = ()
@property
def modified(self):
'''files that have been modified'''
return self[0]
@property
def added(self):
'''files that have been added'''
return self[1]
@property
def removed(self):
'''files that have been removed'''
return self[2]
@property
def deleted(self):
'''files that are in the dirstate, but have been deleted from the
working copy (aka "missing")
'''
return self[3]
@property
def unknown(self):
'''files not in the dirstate that are not ignored'''
return self[4]
@property
def ignored(self):
'''files not in the dirstate that are ignored (by _dirignore())'''
return self[5]
@property
def clean(self):
'''files that have not been modified'''
return self[6]
def itersubrepos(ctx1, ctx2):
"""find subrepos in ctx1 or ctx2"""
# Create a (subpath, ctx) mapping where we prefer subpaths from
# ctx1. The subpaths from ctx2 are important when the .hgsub file
# has been modified (in ctx2) but not yet committed (in ctx1).
subpaths = dict.fromkeys(ctx2.substate, ctx2)
subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
missing = set()
for subpath in ctx2.substate:
if subpath not in ctx1.substate:
del subpaths[subpath]
missing.add(subpath)
for subpath, ctx in sorted(subpaths.iteritems()):
yield subpath, ctx.sub(subpath)
# Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
# status and diff will have an accurate result when it does
# 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
# against itself.
for subpath in missing:
yield subpath, ctx2.nullsub(subpath, ctx1)
def nochangesfound(ui, repo, excluded=None):
'''Report no changes for push/pull, excluded is None or a list of
nodes excluded from the push/pull.
'''
secretlist = []
if excluded:
for n in excluded:
ctx = repo[n]
if ctx.phase() >= phases.secret and not ctx.extinct():
secretlist.append(n)
if secretlist:
ui.status(_("no changes found (ignored %d secret changesets)\n")
% len(secretlist))
else:
ui.status(_("no changes found\n"))
def callcatch(ui, func):
"""call func() with global exception handling
return func() if no exception happens. otherwise do some error handling
and return an exit code accordingly. does not handle all exceptions.
"""
try:
try:
return func()
except: # re-raises
ui.traceback()
raise
# Global exception handling, alphabetically
# Mercurial-specific first, followed by built-in and library exceptions
except error.LockHeld as inst:
if inst.errno == errno.ETIMEDOUT:
reason = _('timed out waiting for lock held by %r') % (
pycompat.bytestr(inst.locker))
else:
reason = _('lock held by %r') % inst.locker
ui.error(_("abort: %s: %s\n") % (
inst.desc or stringutil.forcebytestr(inst.filename), reason))
if not inst.locker:
ui.error(_("(lock might be very busy)\n"))
except error.LockUnavailable as inst:
ui.error(_("abort: could not lock %s: %s\n") %
(inst.desc or stringutil.forcebytestr(inst.filename),
encoding.strtolocal(inst.strerror)))
except error.OutOfBandError as inst:
if inst.args:
msg = _("abort: remote error:\n")
else:
msg = _("abort: remote error\n")
ui.error(msg)
if inst.args:
ui.error(''.join(inst.args))
if inst.hint:
ui.error('(%s)\n' % inst.hint)
except error.RepoError as inst:
ui.error(_("abort: %s!\n") % inst)
if inst.hint:
ui.error(_("(%s)\n") % inst.hint)
except error.ResponseError as inst:
ui.error(_("abort: %s") % inst.args[0])
msg = inst.args[1]
if isinstance(msg, type(u'')):
msg = pycompat.sysbytes(msg)
if not isinstance(msg, bytes):
ui.error(" %r\n" % (msg,))
elif not msg:
ui.error(_(" empty string\n"))
else:
ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
except error.CensoredNodeError as inst:
ui.error(_("abort: file censored %s!\n") % inst)
except error.StorageError as inst:
ui.error(_("abort: %s!\n") % inst)
except error.InterventionRequired as inst:
ui.error("%s\n" % inst)
if inst.hint:
ui.error(_("(%s)\n") % inst.hint)
return 1
except error.WdirUnsupported:
ui.error(_("abort: working directory revision cannot be specified\n"))
except error.Abort as inst:
ui.error(_("abort: %s\n") % inst)
if inst.hint:
ui.error(_("(%s)\n") % inst.hint)
except ImportError as inst:
ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
m = stringutil.forcebytestr(inst).split()[-1]
if m in "mpatch bdiff".split():
ui.error(_("(did you forget to compile extensions?)\n"))
elif m in "zlib".split():
ui.error(_("(is your Python install correct?)\n"))
except IOError as inst:
if util.safehasattr(inst, "code"):
ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
elif util.safehasattr(inst, "reason"):
try: # usually it is in the form (errno, strerror)
reason = inst.reason.args[1]
except (AttributeError, IndexError):
# it might be anything, for example a string
reason = inst.reason
if isinstance(reason, pycompat.unicode):
# SSLError of Python 2.7.9 contains a unicode
reason = encoding.unitolocal(reason)
ui.error(_("abort: error: %s\n") % reason)
elif (util.safehasattr(inst, "args")
and inst.args and inst.args[0] == errno.EPIPE):
pass
elif getattr(inst, "strerror", None):
if getattr(inst, "filename", None):
ui.error(_("abort: %s: %s\n") % (
encoding.strtolocal(inst.strerror),
stringutil.forcebytestr(inst.filename)))
else:
ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
else:
raise
except OSError as inst:
if getattr(inst, "filename", None) is not None:
ui.error(_("abort: %s: '%s'\n") % (
encoding.strtolocal(inst.strerror),
stringutil.forcebytestr(inst.filename)))
else:
ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
except MemoryError:
ui.error(_("abort: out of memory\n"))
except SystemExit as inst:
# Commands shouldn't sys.exit directly, but give a return code.
# Just in case catch this and and pass exit code to caller.
return inst.code
except socket.error as inst:
ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
return -1
def checkfilename(f):
'''Check that the filename f is an acceptable filename for a tracked file'''
if '\r' in f or '\n' in f:
raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
% pycompat.bytestr(f))
def checkportable(ui, f):
'''Check if filename f is portable and warn or abort depending on config'''
checkfilename(f)
abort, warn = checkportabilityalert(ui)
if abort or warn:
msg = util.checkwinfilename(f)
if msg:
msg = "%s: %s" % (msg, procutil.shellquote(f))
if abort:
raise error.Abort(msg)
ui.warn(_("warning: %s\n") % msg)
def checkportabilityalert(ui):
'''check if the user's config requests nothing, a warning, or abort for
non-portable filenames'''
val = ui.config('ui', 'portablefilenames')
lval = val.lower()
bval = stringutil.parsebool(val)
abort = pycompat.iswindows or lval == 'abort'
warn = bval or lval == 'warn'
if bval is None and not (warn or abort or lval == 'ignore'):
raise error.ConfigError(
_("ui.portablefilenames value is invalid ('%s')") % val)
return abort, warn
def filteredhash(repo, maxrev):
"""build hash of filtered revisions in the current repoview.
Multiple caches perform up-to-date validation by checking that the
tiprev and tipnode stored in the cache file match the current repository.
However, this is not sufficient for validating repoviews because the set
of revisions in the view may change without the repository tiprev and
tipnode changing.
This function hashes all the revs filtered from the view and returns
that SHA-1 digest.
"""
cl = repo.changelog
if not cl.filteredrevs:
return None
key = None
revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
if revs:
s = hashlib.sha1()
for rev in revs:
s.update('%d;' % rev)
key = s.digest()
return key
def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
'''yield every hg repository under path, always recursively.
The recurse flag will only control recursion into repo working dirs'''
samestat = getattr(os.path, 'samestat', None)
if followsym and samestat is not None:
else:
followsym = False
if (seen_dirs is None) and followsym:
seen_dirs = []
adddir(seen_dirs, path)
for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
dirs.sort()
if '.hg' in dirs:
yield root # found a repository
qroot = os.path.join(root, '.hg', 'patches')
if os.path.isdir(os.path.join(qroot, '.hg')):
yield qroot # we have a patch queue repo here
if recurse:
# avoid recursing inside the .hg directory
dirs.remove('.hg')
else:
dirs[:] = [] # don't descend further
elif followsym:
newdirs = []
for d in dirs:
fname = os.path.join(root, d)
if adddir(seen_dirs, fname):
if os.path.islink(fname):
for hgname in walkrepos(fname, True, seen_dirs):
yield hgname
else:
newdirs.append(d)
dirs[:] = newdirs
def binnode(ctx):
"""Return binary node id for a given basectx"""
node = ctx.node()
if node is None:
return wdirid
return node
def intrev(ctx):
"""Return integer for a given basectx that can be used in comparison or
arithmetic operation"""
rev = ctx.rev()
if rev is None:
return wdirrev
return rev
def formatchangeid(ctx):
"""Format changectx as '{rev}:{node|formatnode}', which is the default
template provided by logcmdutil.changesettemplater"""
repo = ctx.repo()
return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
def formatrevnode(ui, rev, node):
"""Format given revision and node depending on the current verbosity"""
if ui.debugflag:
hexfunc = hex
else:
hexfunc = short
return '%d:%s' % (rev, hexfunc(node))
def mayberevnum(repo, prefix):
"""Checks if the given prefix may be mistaken for a revision number"""
try:
i = int(prefix)
# if we are a pure int, then starting with zero will not be
# confused as a rev; or, obviously, if the int is larger
# than the value of the tip rev. We still need to disambiguate if
# prefix == '0', since that *is* a valid revnum.
if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
return False
return True
except ValueError:
return False
def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
"""Find the shortest unambiguous prefix that matches hexnode.
If "cache" is not None, it must be a dictionary that can be used for
caching between calls to this method.
"""
# _partialmatch() of filtered changelog could take O(len(repo)) time,
# which would be unacceptably slow. so we look for hash collision in
# unfiltered space, which means some hashes may be slightly longer.
minlength=max(minlength, 1)
def disambiguate(prefix):
"""Disambiguate against revnums."""
if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
if mayberevnum(repo, prefix):
return 'x' + prefix
else:
return prefix
hexnode = hex(node)
for length in range(len(prefix), len(hexnode) + 1):
prefix = hexnode[:length]
if not mayberevnum(repo, prefix):
return prefix
cl = repo.unfiltered().changelog
revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
if revset:
revs = None
if cache is not None:
revs = cache.get('disambiguationrevset')
if revs is None:
revs = repo.anyrevs([revset], user=True)
if cache is not None:
cache['disambiguationrevset'] = revs
if cl.rev(node) in revs:
hexnode = hex(node)
nodetree = None
if cache is not None:
nodetree = cache.get('disambiguationnodetree')
if not nodetree:
try:
nodetree = parsers.nodetree(cl.index, len(revs))
except AttributeError:
# no native nodetree
pass
else:
for r in revs:
nodetree.insert(r)
if cache is not None:
cache['disambiguationnodetree'] = nodetree
if nodetree is not None:
length = max(nodetree.shortest(node), minlength)
prefix = hexnode[:length]
return disambiguate(prefix)
for length in range(minlength, len(hexnode) + 1):
matches = []
prefix = hexnode[:length]
for rev in revs:
otherhexnode = repo[rev].hex()
if prefix == otherhexnode[:length]:
matches.append(otherhexnode)
if len(matches) == 1:
return disambiguate(prefix)
try:
return disambiguate(cl.shortest(node, minlength))
except error.LookupError:
raise error.RepoLookupError()
def isrevsymbol(repo, symbol):
"""Checks if a symbol exists in the repo.
See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
symbol is an ambiguous nodeid prefix.
"""
try:
revsymbol(repo, symbol)
return True
except error.RepoLookupError:
return False
def revsymbol(repo, symbol):
"""Returns a context given a single revision symbol (as string).
This is similar to revsingle(), but accepts only a single revision symbol,
i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
not "max(public())".
"""
if not isinstance(symbol, bytes):
msg = ("symbol (%s of type %s) was not a string, did you mean "
"repo[symbol]?" % (symbol, type(symbol)))
raise error.ProgrammingError(msg)
try:
if symbol in ('.', 'tip', 'null'):
return repo[symbol]
try:
r = int(symbol)
if '%d' % r != symbol:
raise ValueError
l = len(repo.changelog)
if r < 0:
r += l
if r < 0 or r >= l and r != wdirrev:
raise ValueError
return repo[r]
except error.FilteredIndexError:
raise
except (ValueError, OverflowError, IndexError):
pass
if len(symbol) == 40:
try:
node = bin(symbol)
rev = repo.changelog.rev(node)
return repo[rev]
except error.FilteredLookupError:
raise
except (TypeError, LookupError):
pass
# look up bookmarks through the name interface
try:
node = repo.names.singlenode(repo, symbol)
rev = repo.changelog.rev(node)
return repo[rev]
except KeyError:
pass
node = resolvehexnodeidprefix(repo, symbol)
if node is not None:
rev = repo.changelog.rev(node)
return repo[rev]
raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
except error.WdirUnsupported:
return repo[None]
except (error.FilteredIndexError, error.FilteredLookupError,
error.FilteredRepoLookupError):
raise _filterederror(repo, symbol)
def _filterederror(repo, changeid):
"""build an exception to be raised about a filtered changeid
This is extracted in a function to help extensions (eg: evolve) to
experiment with various message variants."""
if repo.filtername.startswith('visible'):
# Check if the changeset is obsolete
unfilteredrepo = repo.unfiltered()
ctx = revsymbol(unfilteredrepo, changeid)
# If the changeset is obsolete, enrich the message with the reason
# that made this changeset not visible
if ctx.obsolete():
msg = obsutil._getfilteredreason(repo, changeid, ctx)
else:
msg = _("hidden revision '%s'") % changeid
hint = _('use --hidden to access hidden revisions')
return error.FilteredRepoLookupError(msg, hint=hint)
msg = _("filtered revision '%s' (not in '%s' subset)")
msg %= (changeid, repo.filtername)
return error.FilteredRepoLookupError(msg)
def revrange(repo, specs, localalias=None):
"""Execute 1 to many revsets and return the union.
This is the preferred mechanism for executing revsets using user-specified
config options, such as revset aliases.
The revsets specified by ``specs`` will be executed via a chained ``OR``
expression. If ``specs`` is empty, an empty result is returned.
``specs`` can contain integers, in which case they are assumed to be
revision numbers.
It is assumed the revsets are already formatted. If you have arguments
that need to be expanded in the revset, call ``revsetlang.formatspec()``
and pass the result as an element of ``specs``.
Specifying a single revset is allowed.
Returns a ``revset.abstractsmartset`` which is a list-like interface over
integer revisions.
"""
allspecs = []
for spec in specs:
if isinstance(spec, int):
spec = revsetlang.formatspec('rev(%d)', spec)
allspecs.append(spec)
return repo.anyrevs(allspecs, user=True, localalias=localalias)
def meaningfulparents(repo, ctx):
"""Return list of meaningful (or all if debug) parentrevs for rev.
For merges (two non-nullrev revisions) both parents are meaningful.
Otherwise the first parent revision is considered meaningful if it
is not the preceding revision.
"""
parents = ctx.parents()
if len(parents) > 1:
return parents
if repo.ui.debugflag:
return [parents[0], repo[nullrev]]
if parents[0].rev() >= intrev(ctx) - 1:
return []
return parents
def expandpats(pats):
'''Expand bare globs when running on windows.
On posix we assume it already has already been done by sh.'''
if not util.expandglobs:
return list(pats)
ret = []
for kindpat in pats:
kind, pat = matchmod._patsplit(kindpat, None)
if kind is None:
try:
globbed = glob.glob(pat)
except re.error:
globbed = [pat]
if globbed:
ret.extend(globbed)
continue
ret.append(kindpat)
return ret
def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
badfn=None):
'''Return a matcher and the patterns that were used.
The matcher will warn about bad matches, unless an alternate badfn callback
is provided.'''
if pats == ("",):
pats = []
if opts is None:
opts = {}
if not globbed and default == 'relpath':
pats = expandpats(pats or [])
if badfn is None:
badfn = bad
m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
default, listsubrepos=opts.get('subrepos'), badfn=badfn)
if m.always():
pats = []
return m, pats
def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
badfn=None):
'''Return a matcher that will warn about bad matches.'''
return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
def matchall(repo):
'''Return a matcher that will efficiently match everything.'''
return matchmod.always(repo.root, repo.getcwd())
def matchfiles(repo, files, badfn=None):
'''Return a matcher that will efficiently match exactly these files.'''
return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
def parsefollowlinespattern(repo, rev, pat, msg):
"""Return a file name from `pat` pattern suitable for usage in followlines
logic.
"""
if not matchmod.patkind(pat):
return pathutil.canonpath(repo.root, repo.getcwd(), pat)
else:
ctx = repo[rev]
m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
files = [f for f in ctx if m(f)]
if len(files) != 1:
raise error.ParseError(msg)
return files[0]
def origpath(ui, repo, filepath):
'''customize where .orig files are created
Fetch user defined path from config file: [ui] origbackuppath = <path>
Fall back to default (filepath with .orig suffix) if not specified
'''
origbackuppath = ui.config('ui', 'origbackuppath')
if not origbackuppath:
return filepath + ".orig"
# Convert filepath from an absolute path into a path inside the repo.
filepathfromroot = util.normpath(os.path.relpath(filepath,
start=repo.root))
origvfs = vfs.vfs(repo.wjoin(origbackuppath))
origbackupdir = origvfs.dirname(filepathfromroot)
if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
# Remove any files that conflict with the backup file's path
for f in reversed(list(util.finddirs(filepathfromroot))):
if origvfs.isfileorlink(f):
ui.note(_('removing conflicting file: %s\n')
% origvfs.join(f))
origvfs.unlink(f)
break
origvfs.makedirs(origbackupdir)
if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
ui.note(_('removing conflicting directory: %s\n')
% origvfs.join(filepathfromroot))
origvfs.rmtree(filepathfromroot, forcibly=True)
return origvfs.join(filepathfromroot)
class _containsnode(object):
"""proxy __contains__(node) to container.__contains__ which accepts revs"""
def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
fixphase=False, targetphase=None, backup=True):
"""do common cleanups when old nodes are replaced by new nodes
That includes writing obsmarkers or stripping nodes, and moving bookmarks.
(we might also want to move working directory parent in the future)
By default, bookmark moves are calculated automatically from 'replacements',
but 'moves' can be used to override that. Also, 'moves' may include
additional bookmark moves that should not have associated obsmarkers.
replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
have replacements. operation is a string, like "rebase".
metadata is dictionary containing metadata to be stored in obsmarker if
obsolescence is enabled.
"""
assert fixphase or targetphase is None
if not replacements and not moves:
return
# translate mapping's other forms
if not util.safehasattr(replacements, 'items'):
replacements = {(n,): () for n in replacements}
else:
# upgrading non tuple "source" to tuple ones for BC
repls = {}
for key, value in replacements.items():
if not isinstance(key, tuple):
key = (key,)
repls[key] = value
replacements = repls
# Calculate bookmark movements
if moves is None:
moves = {}
# Unfiltered repo is needed since nodes in replacements might be hidden.
unfi = repo.unfiltered()
for oldnodes, newnodes in replacements.items():
for oldnode in oldnodes:
if oldnode in moves:
continue
if len(newnodes) > 1:
# usually a split, take the one with biggest rev number
newnode = next(unfi.set('max(%ln)', newnodes)).node()
elif len(newnodes) == 0:
# move bookmark backwards
allreplaced = []
for rep in replacements:
allreplaced.extend(rep)
roots = list(unfi.set('max((::%n) - %ln)', oldnode,
allreplaced))
if roots:
newnode = roots[0].node()
else:
newnode = nullid
else:
newnode = newnodes[0]
moves[oldnode] = newnode
allnewnodes = [n for ns in replacements.values() for n in ns]
toretract = {}
toadvance = {}
if fixphase:
precursors = {}
for oldnodes, newnodes in replacements.items():
for oldnode in oldnodes:
for newnode in newnodes:
precursors.setdefault(newnode, []).append(oldnode)
allnewnodes.sort(key=lambda n: unfi[n].rev())
newphases = {}
for newnode in allnewnodes:
ctx = unfi[newnode]
parentphase = max(phase(p) for p in ctx.parents())
if targetphase is None:
oldphase = max(unfi[oldnode].phase()
for oldnode in precursors[newnode])
newphase = max(oldphase, parentphase)
else:
newphase = max(targetphase, parentphase)
newphases[newnode] = newphase
if newphase > ctx.phase():
toretract.setdefault(newphase, []).append(newnode)
elif newphase < ctx.phase():
toadvance.setdefault(newphase, []).append(newnode)
with repo.transaction('cleanup') as tr:
# Move bookmarks
bmarks = repo._bookmarks
bmarkchanges = []
for oldnode, newnode in moves.items():
oldbmarks = repo.nodebookmarks(oldnode)
if not oldbmarks:
continue
from . import bookmarks # avoid import cycle
repo.ui.debug('moving bookmarks %r from %s to %s\n' %
(pycompat.rapply(pycompat.maybebytestr, oldbmarks),
hex(oldnode), hex(newnode)))
# Delete divergent bookmarks being parents of related newnodes
deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
allnewnodes, newnode, oldnode)
deletenodes = _containsnode(repo, deleterevs)
for name in oldbmarks:
bmarkchanges.append((name, newnode))
for b in bookmarks.divergent2delete(repo, deletenodes, name):
bmarkchanges.append((b, None))
if bmarkchanges:
bmarks.applychanges(repo, tr, bmarkchanges)
for phase, nodes in toretract.items():
phases.retractboundary(repo, tr, phase, nodes)
for phase, nodes in toadvance.items():
phases.advanceboundary(repo, tr, phase, nodes)
# Obsolete or strip nodes
if obsolete.isenabled(repo, obsolete.createmarkersopt):
# If a node is already obsoleted, and we want to obsolete it
# without a successor, skip that obssolete request since it's
# unnecessary. That's the "if s or not isobs(n)" check below.
# Also sort the node in topology order, that might be useful for
# some obsstore logic.
# NOTE: the sorting might belong to createmarkers.
torev = unfi.changelog.rev
sortfunc = lambda ns: torev(ns[0][0])
rels = []
for ns, s in sorted(replacements.items(), key=sortfunc):
rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
rels.append(rel)
if rels:
obsolete.createmarkers(repo, rels, operation=operation,
metadata=metadata)
else:
from . import repair # avoid import cycle
tostrip = list(n for ns in replacements for n in ns)
if tostrip:
repair.delayedstrip(repo.ui, repo, tostrip, operation,
backup=backup)
def marktouched(repo, files, similarity=0.0):
'''Assert that files have somehow been operated upon. files are relative to
the repo root.'''
m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
rejected = []
added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
if repo.ui.verbose:
unknownset = set(unknown + forgotten)
toprint = unknownset.copy()
toprint.update(deleted)
for abs in sorted(toprint):
if abs in unknownset:
status = _('adding %s\n') % abs
else:
status = _('removing %s\n') % abs
repo.ui.status(status)
renames = _findrenames(repo, m, added + unknown, removed + deleted,
similarity)
_markchanges(repo, unknown + forgotten, deleted, renames)
for f in rejected:
if f in m.files():
return 1
return 0
def _interestingfiles(repo, matcher):
'''Walk dirstate with matcher, looking for files that addremove would care
about.
This is different from dirstate.status because it doesn't care about
whether files are modified or clean.'''
added, unknown, deleted, removed, forgotten = [], [], [], [], []
audit_path = pathutil.pathauditor(repo.root, cached=True)
ctx = repo[None]
dirstate = repo.dirstate
matcher = repo.narrowmatch(matcher, includeexact=True)
walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
unknown=True, ignored=False, full=False)
for abs, st in walkresults.iteritems():
dstate = dirstate[abs]
if dstate == '?' and audit_path.check(abs):
unknown.append(abs)
elif dstate != 'r' and not st:
deleted.append(abs)
elif dstate == 'r' and st:
forgotten.append(abs)
# for finding renames
elif dstate == 'r' and not st:
removed.append(abs)
elif dstate == 'a':
added.append(abs)
return added, unknown, deleted, removed, forgotten
def _findrenames(repo, matcher, added, removed, similarity):
'''Find renames from removed files to added ones.'''
renames = {}
if similarity > 0:
for old, new, score in similar.findrenames(repo, added, removed,
similarity):
if (repo.ui.verbose or not matcher.exact(old)
or not matcher.exact(new)):
repo.ui.status(_('recording removal of %s as rename to %s '
'(%d%% similar)\n') %
(matcher.rel(old), matcher.rel(new),
score * 100))
renames[new] = old
return renames
def _markchanges(repo, unknown, deleted, renames):
'''Marks the files in unknown as added, the files in deleted as removed,
and the files in renames as copied.'''
wctx = repo[None]
with repo.wlock():
wctx.forget(deleted)
wctx.add(unknown)
for new, old in renames.iteritems():
wctx.copy(old, new)
def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
"""Update the dirstate to reflect the intent of copying src to dst. For
different reasons it might not end with dst being marked as copied from src.
"""
origsrc = repo.dirstate.copied(src) or src
if dst == origsrc: # copying back a copy?
if repo.dirstate[dst] not in 'mn' and not dryrun:
repo.dirstate.normallookup(dst)
else:
if repo.dirstate[origsrc] == 'a' and origsrc == src:
if not ui.quiet:
ui.warn(_("%s has not been committed yet, so no copy "
"data will be stored for %s.\n")
% (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
if repo.dirstate[dst] in '?r' and not dryrun:
wctx.add([dst])
elif not dryrun:
wctx.copy(origsrc, dst)
class filecache(object):
"""A property like decorator that tracks files under .hg/ for updates.
On first access, the files defined as arguments are stat()ed and the
results cached. The decorated function is called. The results are stashed
away in a ``_filecache`` dict on the object whose method is decorated.
On subsequent access, the cached result is returned.
On external property set operations, stat() calls are performed and the new
value is cached.
On property delete operations, cached data is removed.
When using the property API, cached data is always returned, if available:
no stat() is performed to check if the file has changed and if the function
needs to be called to reflect file changes.
Others can muck about with the state of the ``_filecache`` dict. e.g. they
can populate an entry before the property's getter is called. In this case,
entries in ``_filecache`` will be used during property operations,
if available. If the underlying file changes, it is up to external callers
to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
method result as well as possibly calling ``del obj._filecache[attr]`` to
remove the ``filecacheentry``.
"""
def join(self, obj, fname):
"""Used to compute the runtime path of a cached file.
Users should subclass filecache and provide their own version of this
function to call the appropriate join function on 'obj' (an instance
of the class that its member function was decorated).
"""
raise NotImplementedError
def extdatasource(repo, source):
"""Gather a map of rev -> value dict from the specified source
A source spec is treated as a URL, with a special case shell: type
for parsing the output from a shell command.
The data is parsed as a series of newline-separated records where
each record is a revision specifier optionally followed by a space
and a freeform string value. If the revision is known locally, it
is converted to a rev, otherwise the record is skipped.
Note that both key and value are treated as UTF-8 and converted to
the local encoding. This allows uniformity between local and
remote data sources.
"""
spec = repo.ui.config("extdata", source)
if not spec:
raise error.Abort(_("unknown extdata source '%s'") % source)
data = {}
src = proc = None
try:
if spec.startswith("shell:"):
# external commands should be run relative to the repo root
cmd = spec[6:]
proc = subprocess.Popen(procutil.tonativestr(cmd),
shell=True, bufsize=-1,
close_fds=procutil.closefds,
stdout=subprocess.PIPE,
cwd=procutil.tonativestr(repo.root))
src = proc.stdout
else:
# treat as a URL or file
src = url.open(repo.ui, spec)
for l in src:
if " " in l:
k, v = l.strip().split(" ", 1)
else:
k, v = l.strip(), ""
k = encoding.tolocal(k)
try:
data[revsingle(repo, k).rev()] = encoding.tolocal(v)
except (error.LookupError, error.RepoLookupError):
pass # we ignore data for nodes that don't exist locally
finally:
if proc:
proc.communicate()
if src:
src.close()
if proc and proc.returncode != 0:
raise error.Abort(_("extdata command '%s' failed: %s")
% (cmd, procutil.explainexit(proc.returncode)))
return data
def wlocksub(repo, cmd, *args, **kwargs):
"""run cmd as a subprocess that allows inheriting repo's wlock
This can only be called while the wlock is held. This takes all the
arguments that ui.system does, and returns the exit code of the
subprocess."""
return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
**kwargs)
def gdinitconfig(ui):
"""helper function to know if a repo should be created as general delta
"""
# experimental config: format.generaldelta
return (ui.configbool('format', 'generaldelta')
or ui.configbool('format', 'usegeneraldelta')
or ui.configbool('format', 'sparse-revlog'))
def gddeltaconfig(ui):
"""helper function to know if incoming delta should be optimised
"""
# experimental config: format.generaldelta
return ui.configbool('format', 'generaldelta')
class simplekeyvaluefile(object):
"""A simple file with key=value lines
Keys must be alphanumerics and start with a letter, values must not
contain '\n' characters"""
firstlinekey = '__firstline'
def read(self, firstlinenonkeyval=False):
"""Read the contents of a simple key-value file
'firstlinenonkeyval' indicates whether the first line of file should
be treated as a key-value pair or reuturned fully under the
__firstline key."""
lines = self.vfs.readlines(self.path)
d = {}
if firstlinenonkeyval:
if not lines:
e = _("empty simplekeyvalue file")
raise error.CorruptedState(e)
# we don't want to include '\n' in the __firstline
d[self.firstlinekey] = lines[0][:-1]
del lines[0]
try:
# the 'if line.strip()' part prevents us from failing on empty
# lines which only contain '\n' therefore are not skipped
# by 'if line'
updatedict = dict(line[:-1].split('=', 1) for line in lines
if line.strip())
if self.firstlinekey in updatedict:
e = _("%r can't be used as a key")
raise error.CorruptedState(e % self.firstlinekey)
d.update(updatedict)
except ValueError as e:
raise error.CorruptedState(str(e))
return d
def write(self, data, firstline=None):
"""Write key=>value mapping to a file
data is a dict. Keys must be alphanumerical and start with a letter.
Values must not contain newline characters.
If 'firstline' is not None, it is written to file before
everything else, as it is, not in a key=value form"""
lines = []
if firstline is not None:
lines.append('%s\n' % firstline)
for k, v in data.items():
if k == self.firstlinekey:
e = "key name '%s' is reserved" % self.firstlinekey
raise error.ProgrammingError(e)
if not k[0:1].isalpha():
e = "keys must start with a letter in a key-value file"
raise error.ProgrammingError(e)
if not k.isalnum():
e = "invalid key name in a simple key-value file"
raise error.ProgrammingError(e)
if '\n' in v:
e = "invalid value in a simple key-value file"
raise error.ProgrammingError(e)
lines.append("%s=%s\n" % (k, v))
with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
fp.write(''.join(lines))
_reportobsoletedsource = [
'debugobsolete',
'pull',
'push',
'serve',
'unbundle',
]
_reportnewcssource = [
'pull',
'unbundle',
]
def prefetchfiles(repo, revs, match):
"""Invokes the registered file prefetch functions, allowing extensions to
ensure the corresponding files are available locally, before the command
uses them."""
if match:
# The command itself will complain about files that don't exist, so
# don't duplicate the message.
match = matchmod.badmatch(match, lambda fn, msg: None)
else:
match = matchall(repo)
fileprefetchhooks(repo, revs, match)
# a list of (repo, revs, match) prefetch functions
fileprefetchhooks = util.hooks()
# A marker that tells the evolve extension to suppress its own reporting
_reportstroubledchangesets = True
def registersummarycallback(repo, otr, txnname=''):
"""register a callback to issue a summary after the transaction is closed
"""
categories = []
def reportsummary(func):
"""decorator for report callbacks."""
# The repoview life cycle is shorter than the one of the actual
# underlying repository. So the filtered object can die before the
# weakref is used leading to troubles. We keep a reference to the
# unfiltered object and restore the filtering when retrieving the
# repository through the weakref.
filtername = repo.filtername
reporef = weakref.ref(repo.unfiltered())
newcat = '%02i-txnreport' % len(categories)
otr.addpostclose(newcat, wrapped)
categories.append(newcat)
return wrapped
if txmatch(_reportobsoletedsource):
@reportsummary
if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
repo.ui.configbool('experimental', 'evolution.report-instabilities')):
instabilitytypes = [
('orphan', 'orphan'),
('phase-divergent', 'phasedivergent'),
('content-divergent', 'contentdivergent'),
]
oldinstabilitycounts = getinstabilitycounts(repo)
@reportsummary
if txmatch(_reportnewcssource):
@reportsummary
def reportnewcs(repo, tr):
"""Report the range of new revisions pulled/unbundled."""
origrepolen = tr.changes.get('origrepolen', len(repo))
unfi = repo.unfiltered()
if origrepolen >= len(unfi):
return
# Compute the bounds of new visible revisions' range.
revs = smartset.spanset(repo, start=origrepolen)
if revs:
minrev, maxrev = repo[revs.min()], repo[revs.max()]
if minrev == maxrev:
revrange = minrev
else:
revrange = '%s:%s' % (minrev, maxrev)
draft = len(repo.revs('%ld and draft()', revs))
secret = len(repo.revs('%ld and secret()', revs))
if not (draft or secret):
msg = _('new changesets %s\n') % revrange
elif draft and secret:
msg = _('new changesets %s (%d drafts, %d secrets)\n')
msg %= (revrange, draft, secret)
elif draft:
msg = _('new changesets %s (%d drafts)\n')
msg %= (revrange, draft)
elif secret:
msg = _('new changesets %s (%d secrets)\n')
msg %= (revrange, secret)
else:
errormsg = 'entered unreachable condition'
raise error.ProgrammingError(errormsg)
repo.ui.status(msg)
# search new changesets directly pulled as obsolete
duplicates = tr.changes.get('revduplicates', ())
obsadded = unfi.revs('(%d: + %ld) and obsolete()',
origrepolen, duplicates)
cl = repo.changelog
extinctadded = [r for r in obsadded if r not in cl]
if extinctadded:
# They are not just obsolete, but obsolete and invisible
# we call them "extinct" internally but the terms have not been
# exposed to users.
msg = '(%d other changesets obsolete on arrival)\n'
repo.ui.status(msg % len(extinctadded))
@reportsummary
def reportphasechanges(repo, tr):
"""Report statistics of phase changes for changesets pre-existing
pull/unbundle.
"""
origrepolen = tr.changes.get('origrepolen', len(repo))
phasetracking = tr.changes.get('phases', {})
if not phasetracking:
return
published = [
rev for rev, (old, new) in phasetracking.iteritems()
if new == phases.public and rev < origrepolen
]
if not published:
return
repo.ui.status(_('%d local changesets published\n')
% len(published))
def getinstabilitymessage(delta, instability):
"""function to return the message to show warning about new instabilities
exists as a separate function so that extension can wrap to show more
information like how to fix instabilities"""
if delta > 0:
return _('%i new %s changesets\n') % (delta, instability)
def enforcesinglehead(repo, tr, desc):
"""check that no named branch has multiple heads"""
if desc in ('strip', 'repair'):
# skip the logic during strip
return
visible = repo.filtered('visible')
# possible improvement: we could restrict the check to affected branch
for name, heads in visible.branchmap().iteritems():
if len(heads) > 1:
msg = _('rejecting multiple heads on branch "%s"')
msg %= name
hint = _('%d heads: %s')
hint %= (len(heads), nodesummaries(repo, heads))
raise error.Abort(msg, hint=hint)
def wrapconvertsink(sink):
"""Allow extensions to wrap the sink returned by convcmd.convertsink()
before it is used, whether or not the convert extension was formally loaded.
"""
return sink
def unhidehashlikerevs(repo, specs, hiddentype):
"""parse the user specs and unhide changesets whose hash or revision number
is passed.
hiddentype can be: 1) 'warn': warn while unhiding changesets
2) 'nowarn': don't warn while unhiding changesets
returns a repo object with the required changesets unhidden
"""
if not repo.filtername or not repo.ui.configbool('experimental',
'directaccess'):
return repo
if repo.filtername not in ('visible', 'visible-hidden'):
return repo
symbols = set()
for spec in specs:
try:
tree = revsetlang.parse(spec)
except error.ParseError: # will be reported by scmutil.revrange()
continue
symbols.update(revsetlang.gethashlikesymbols(tree))
if not symbols:
return repo
revs = _getrevsfromsymbols(repo, symbols)
if not revs:
return repo
if hiddentype == 'warn':
unfi = repo.unfiltered()
revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
repo.ui.warn(_("warning: accessing hidden changesets for write "
"operation: %s\n") % revstr)
# we have to use new filtername to separate branch/tags cache until we can
# disbale these cache when revisions are dynamically pinned.
return repo.filtered('visible-hidden', revs)
def _getrevsfromsymbols(repo, symbols):
"""parse the list of symbols and returns a set of revision numbers of hidden
changesets present in symbols"""
revs = set()
unfi = repo.unfiltered()
unficl = unfi.changelog
cl = repo.changelog
tiprev = len(unficl)
allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
for s in symbols:
try:
n = int(s)
if n <= tiprev:
if not allowrevnums:
continue
else:
if n not in cl:
revs.add(n)
continue
except ValueError:
pass
try:
s = resolvehexnodeidprefix(unfi, s)
except (error.LookupError, error.WdirUnsupported):
s = None
if s is not None:
rev = unficl.rev(s)
if rev not in cl:
revs.add(rev)
return revs
def bookmarkrevs(repo, mark):
"""
Select revisions reachable by a given bookmark
"""
return repo.revs("ancestors(bookmark(%s)) - "
"ancestors(head() and not bookmark(%s)) - "
"ancestors(bookmark() and not bookmark(%s))",
mark, mark, mark)
| [
2,
629,
21973,
346,
13,
9078,
532,
12185,
333,
498,
4755,
10361,
5499,
198,
2,
198,
2,
220,
15069,
4705,
16640,
439,
1279,
3149,
76,
31,
741,
35866,
13,
785,
29,
198,
2,
198,
2,
770,
3788,
743,
307,
973,
290,
9387,
1864,
284,
26... | 2.245906 | 22,716 |
"""The tests for the backports."""
| [
37811,
464,
5254,
329,
262,
736,
3742,
526,
15931,
198
] | 3.5 | 10 |
"""
Utility functions
=================
Includes functions to extract data from a zipped csv without a local download
"""
import requests
import pandas as pd
import io
from zipfile import ZipFile
def read_zip(url: str, file_name: str) -> pd.DataFrame:
"""
Reads a csv from the web contained in a zip folder
Parameters
----------
url : str
Zip folder url
file_name: str
CSV file name written as 'file_name.csv'
Returns
-------
pandas dataframe object
"""
try:
response = requests.get(url)
file = ZipFile(io.BytesIO(response.content))
if file_name not in list(file.NameToInfo.keys()):
raise ValueError(f"{file_name} is not found in the zipped folder")
df = pd.read_csv(file.open(file_name), low_memory=False)
return df
except ConnectionError:
raise ConnectionError("Could not read file")
| [
37811,
198,
18274,
879,
5499,
198,
4770,
28,
198,
42986,
5499,
284,
7925,
1366,
422,
257,
1976,
3949,
269,
21370,
1231,
257,
1957,
4321,
198,
37811,
628,
198,
11748,
7007,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
33245,
198,
... | 2.6793 | 343 |
import json
from machine_settings import _MachineConfig
import os.path as os_path
ENCODING = 'utf8'
| [
11748,
33918,
198,
6738,
4572,
62,
33692,
1330,
4808,
37573,
16934,
198,
11748,
28686,
13,
6978,
355,
28686,
62,
6978,
628,
198,
24181,
3727,
2751,
796,
705,
40477,
23,
6,
628,
198,
220,
220,
220,
220,
220,
220,
220,
220,
628,
628,
... | 2.290323 | 62 |
from elasticsearch import Elasticsearch
import argparse
import os
import json
import re
if __name__ == "__main__":
main()
| [
6738,
27468,
12947,
1330,
48567,
12947,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
302,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
19... | 3.243902 | 41 |
"""
Author: Yonglong Tian (yonglong@mit.edu)
Date: May 07, 2020
"""
from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
from itertools import combinations
class SupConLoss(nn.Module):
"""Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
It also supports the unsupervised contrastive loss in SimCLR"""
def forward(self, features, labels=None, mask=None):
"""Compute loss for model. If both `labels` and `mask` are None,
it degenerates to SimCLR unsupervised loss:
https://arxiv.org/pdf/2002.05709.pdf
Args:
features: hidden vector of shape [bsz, n_views, ...].
labels: ground truth of shape [bsz].
mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j
has the same class as sample i. Can be asymmetric.
Returns:
A loss scalar.
"""
device = (torch.device('cuda')
if features.is_cuda
else torch.device('cpu'))
if len(features.shape) < 3:
raise ValueError('`features` needs to be [bsz, n_views, ...],'
'at least 3 dimensions are required')
if len(features.shape) > 3:
features = features.view(features.shape[0], features.shape[1], -1)
batch_size = features.shape[0]
if labels is not None and mask is not None:
raise ValueError('Cannot define both `labels` and `mask`')
elif labels is None and mask is None:
mask = torch.eye(batch_size, dtype=torch.float32).to(device)
elif labels is not None:
labels = labels.contiguous().view(-1, 1)
if labels.shape[0] != batch_size:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, labels.T).float().to(device)
else:
mask = mask.float().to(device)
contrast_count = features.shape[1]
contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)
if self.contrast_mode == 'one':
anchor_feature = features[:, 0]
anchor_count = 1
elif self.contrast_mode == 'all':
anchor_feature = contrast_feature
anchor_count = contrast_count
else:
raise ValueError('Unknown mode: {}'.format(self.contrast_mode))
# compute logits
anchor_dot_contrast = torch.div(
torch.matmul(anchor_feature, contrast_feature.T),
self.temperature)
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
a = anchor_feature.detach().cpu().numpy()
b = contrast_feature.T.detach().cpu().numpy()
c = anchor_dot_contrast.detach().cpu().numpy()
d = np.matmul(a, b)
# tile mask
mask = mask.repeat(anchor_count, contrast_count)
# mask-out self-contrast cases
logits_mask = torch.scatter(
torch.ones_like(mask),
1,
torch.arange(batch_size * anchor_count).view(-1, 1).to(device),
0
)
mask = mask * logits_mask
# compute log_prob
exp_logits = torch.exp(logits) * logits_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
# loss
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.view(anchor_count, batch_size).mean()
return loss
# CLOCS 中用于对比学习的loss
def obtain_contrastive_loss(latent_embeddings, pids, trial):
""" Calculate NCE Loss For Latent Embeddings in Batch
Args:
latent_embeddings (torch.Tensor): embeddings from model for different perturbations of same instance (BxHxN)
pids (list): patient ids of instances in batch
Outputs:
loss (torch.Tensor): scalar NCE loss
"""
if trial in ['CMSC', 'CMLC', 'CMSMLC']:
pids = np.array(pids, dtype=np.object)
pid1, pid2 = np.meshgrid(pids, pids)
pid_matrix = pid1 + '-' + pid2
pids_of_interest = np.unique(pids + '-' + pids) # unique combinations of pids of interest i.e. matching
bool_matrix_of_interest = np.zeros((len(pids), len(pids)))
for pid in pids_of_interest:
bool_matrix_of_interest += pid_matrix == pid
rows1, cols1 = np.where(np.triu(bool_matrix_of_interest, 1))
rows2, cols2 = np.where(np.tril(bool_matrix_of_interest, -1))
nviews = set(range(latent_embeddings.shape[2]))
view_combinations = combinations(nviews, 2)
loss = 0
ncombinations = 0
loss_terms = 2
# 如果报错误 UnboundLocalError: local variable 'loss_terms' referenced before assignment
# 那就重启PyCharm吧!
for combination in view_combinations:
view1_array = latent_embeddings[:, :, combination[0]] # (BxH)
view2_array = latent_embeddings[:, :, combination[1]] # (BxH)
norm1_vector = view1_array.norm(dim=1).unsqueeze(0)
norm2_vector = view2_array.norm(dim=1).unsqueeze(0)
sim_matrix = torch.mm(view1_array, view2_array.transpose(0, 1))
norm_matrix = torch.mm(norm1_vector.transpose(0, 1), norm2_vector)
temperature = 0.1
argument = sim_matrix / (norm_matrix * temperature)
sim_matrix_exp = torch.exp(argument)
if trial == 'CMC':
""" Obtain Off Diagonal Entries """
# upper_triangle = torch.triu(sim_matrix_exp,1)
# lower_triangle = torch.tril(sim_matrix_exp,-1)
# off_diagonals = upper_triangle + lower_triangle
diagonals = torch.diag(sim_matrix_exp)
""" Obtain Loss Terms(s) """
loss_term1 = -torch.mean(torch.log(diagonals / torch.sum(sim_matrix_exp, 1)))
loss_term2 = -torch.mean(torch.log(diagonals / torch.sum(sim_matrix_exp, 0)))
loss += loss_term1 + loss_term2
loss_terms = 2
elif trial == 'SimCLR':
self_sim_matrix1 = torch.mm(view1_array, view1_array.transpose(0, 1))
self_norm_matrix1 = torch.mm(norm1_vector.transpose(0, 1), norm1_vector)
temperature = 0.1
argument = self_sim_matrix1 / (self_norm_matrix1 * temperature)
self_sim_matrix_exp1 = torch.exp(argument)
self_sim_matrix_off_diagonals1 = torch.triu(self_sim_matrix_exp1, 1) + torch.tril(self_sim_matrix_exp1, -1)
self_sim_matrix2 = torch.mm(view2_array, view2_array.transpose(0, 1))
self_norm_matrix2 = torch.mm(norm2_vector.transpose(0, 1), norm2_vector)
temperature = 0.1
argument = self_sim_matrix2 / (self_norm_matrix2 * temperature)
self_sim_matrix_exp2 = torch.exp(argument)
self_sim_matrix_off_diagonals2 = torch.triu(self_sim_matrix_exp2, 1) + torch.tril(self_sim_matrix_exp2, -1)
denominator_loss1 = torch.sum(sim_matrix_exp, 1) + torch.sum(self_sim_matrix_off_diagonals1, 1)
denominator_loss2 = torch.sum(sim_matrix_exp, 0) + torch.sum(self_sim_matrix_off_diagonals2, 0)
diagonals = torch.diag(sim_matrix_exp)
loss_term1 = -torch.mean(torch.log(diagonals / denominator_loss1))
loss_term2 = -torch.mean(torch.log(diagonals / denominator_loss2))
loss += loss_term1 + loss_term2
loss_terms = 2
elif trial in ['CMSC', 'CMLC', 'CMSMLC']: # ours #CMSMLC = positive examples are same instance and same patient
triu_elements = sim_matrix_exp[rows1, cols1]
tril_elements = sim_matrix_exp[rows2, cols2]
diag_elements = torch.diag(sim_matrix_exp)
triu_sum = torch.sum(sim_matrix_exp, 1)
tril_sum = torch.sum(sim_matrix_exp, 0)
loss_diag1 = -torch.mean(torch.log(diag_elements / triu_sum))
loss_diag2 = -torch.mean(torch.log(diag_elements / tril_sum))
loss_triu = -torch.mean(torch.log(triu_elements / triu_sum[rows1]))
loss_tril = -torch.mean(torch.log(tril_elements / tril_sum[cols2]))
loss = loss_diag1 + loss_diag2
loss_terms = 2
if len(rows1) > 0:
loss += loss_triu # technically need to add 1 more term for symmetry
loss_terms += 1
if len(rows2) > 0:
loss += loss_tril # technically need to add 1 more term for symmetry
loss_terms += 1
# print(loss,loss_triu,loss_tril)
ncombinations += 1
loss = loss / (loss_terms * ncombinations)
return loss | [
37811,
198,
13838,
25,
40633,
6511,
20834,
357,
88,
506,
6511,
31,
2781,
13,
15532,
8,
198,
10430,
25,
1737,
8753,
11,
12131,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
28034,
198,
11748,
28034,
1... | 2.113008 | 4,159 |
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from core.lib.module import HatSploitModule
from utils.payload.payload import payload | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
198,
2,
17168,
13789,
198,
2,
198,
2,
15069,
357,
66,
8,
12131,
12,
1238,
2481,
7232,
88,
6558,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,... | 3.691843 | 331 |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/nvidia/linorobot_ws/install/include".split(';') if "/home/nvidia/linorobot_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "dynamic_reconfigure;message_runtime;roscpp;std_msgs;lino_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "lino_pid"
PROJECT_SPACE_DIR = "/home/nvidia/linorobot_ws/install"
PROJECT_VERSION = "0.0.1"
| [
2,
7560,
422,
3797,
5116,
14,
11215,
539,
14,
28243,
14,
35339,
13,
22866,
13,
14751,
13,
259,
198,
34,
1404,
42,
1268,
62,
47,
8120,
11879,
62,
47,
31688,
10426,
796,
13538,
198,
31190,
23680,
62,
40492,
38,
62,
10943,
16254,
62,
... | 2.343891 | 221 |
"""[paser ini]
Raises:
MCError -- [description]
MCError -- [description]
MCError -- [description]
MCError -- [description]
MCError -- [description]
MCError -- [description]
MCError -- [description]
MCError -- [description]
MCError -- [description]
MCError -- [description]
MCError -- [description]
MCError -- [description]
MCError -- [description]
MCError -- [description]
MCError -- [description]
Returns:
[bool] -- [true or false]
"""
import os
import shutil
import configparser
import codecs
from pys.tool import utils
from pys import path
from pys.log import LOGGER, CONSOLER
from pys.error.exp import MCError
from pys.conf import mconf
def build_config_ini(_data_dir):
"""[-- build create config_ini]
Keyword Arguments:
_meta_dir {[PATH]} -- [input dir] (default: {meta})
_data_dir {[PATH]} -- [output dir] (default: {data})
Raises:
MCError -- [description]
MCError -- [description]
MCError -- [description]
MCError -- [description]
"""
LOGGER.info("build_config_ini start ")
p2p_listen_port = mconf.MchainConf.p2p_listen_port
jsonrpc_listen_port = mconf.MchainConf.jsonrpc_listen_port
channel_listen_port = mconf.MchainConf.channel_listen_port
p2p_ip = mconf.MchainConf.p2p_ip
rpc_ip = mconf.MchainConf.rpc_ip
peers = mconf.MchainConf.peers
meta_dir = '{}/meta'.format(path.get_path())
conf_dir = meta_dir
package_dir = _data_dir
gm_opr = utils.Status.gm_option
group_id = mconf.MchainConf.group_id
utils.file_must_exists('{}/group.{}.genesis'.format(meta_dir, group_id))
if os.path.exists(package_dir):
LOGGER.error(' %s existed, maybe u had created it!', package_dir)
raise MCError(' %s existed, maybe u had created it!' % package_dir)
os.mkdir(package_dir)
default_cfg = configparser.ConfigParser()
if gm_opr:
shutil.copy('{}/tpl/config.ini.gm'.format(path.get_path()),
'{}/.config.ini'.format(conf_dir))
else:
shutil.copy('{}/tpl/config.ini'.format(path.get_path()),
'{}/.config.ini'.format(conf_dir))
try:
with codecs.open('{}/.config.ini'.format(conf_dir),
'r', encoding='utf-8') as config_file:
default_cfg.readfp(config_file)
except Exception as build_exp:
LOGGER.error(
' open config.ini file failed, exception is %s', build_exp)
raise MCError(
' open config.ini file failed, exception is %s' % build_exp)
if not peers:
LOGGER.warning('section peers not existed!')
CONSOLER.warn('section peers not existed!')
else:
for node_id, peer in enumerate(peers):
default_cfg.set("p2p", "node.{}".format(node_id + len(p2p_listen_port)),
peer)
with open('{}/.config.ini'.format(conf_dir), 'w') as config_file:
default_cfg.write(config_file)
# init config.ini & node package
for my_node_index, node_ip in enumerate(p2p_ip):
LOGGER.info("p2p_ip -> %s", node_ip)
try:
if utils.Status.gm_option:
utils.file_must_exists('{}/gmcert_{}_{}.crt'.format(conf_dir,
node_ip,
p2p_listen_port[my_node_index]))
else:
utils.file_must_exists('{}/cert_{}_{}.crt'.format(conf_dir,
node_ip,
p2p_listen_port[my_node_index]))
except Exception as build_exp:
LOGGER.error('%s', build_exp)
raise MCError('%s' % build_exp)
CONSOLER.info(' Generate %s/node_%s_%s ',
package_dir, node_ip, p2p_listen_port[my_node_index])
node_dir = '{}/node_{}_{}'.format(package_dir,
node_ip, p2p_listen_port[my_node_index])
os.mkdir(node_dir)
shutil.copy('{}/tpl/start.sh'.format(path.get_path()),
'{}/start.sh'.format(node_dir))
shutil.copy('{}/tpl/stop.sh'.format(path.get_path()),
'{}/stop.sh'.format(node_dir))
shutil.copy('{}/fisco-bcos'.format(meta_dir),
'{}/fisco-bcos'.format(node_dir))
os.mkdir('{}/conf'.format(node_dir))
try:
# get node cert
shutil.copy('{}/.config.ini'.format(conf_dir),
'{}/config.ini'.format(node_dir))
shutil.copy('{}/group.{}.genesis'.format(conf_dir, group_id),
'{}/conf/group.{}.genesis'.format(node_dir, group_id))
shutil.copy('{}/tpl/group.i.ini'.format(path.get_path()),
'{}/conf/group.{}.ini'.format(node_dir, group_id))
if gm_opr:
get_node_cert('{}/gmcert_{}_{}.crt'.format(meta_dir, node_ip,
p2p_listen_port[my_node_index]),
'{}/conf/gmnode.crt'.format(node_dir))
# get_nodeid('{}/conf/gmnode.crt'.format(node_dir),
# '{}/conf/gmnode.nodeid'.format(node_dir))
shutil.copyfile('{}/gmca.crt'.format(meta_dir),
'{}/conf/gmca.crt'.format(node_dir))
else:
get_node_cert('{}/cert_{}_{}.crt'.format(meta_dir, node_ip,
p2p_listen_port[my_node_index]),
'{}/conf/node.crt'.format(node_dir))
# get_nodeid('{}/conf/node.crt'.format(node_dir),
# '{}/conf/node.nodeid'.format(node_dir))
shutil.copyfile('{}/ca.crt'.format(meta_dir),
'{}/conf/ca.crt'.format(node_dir))
except Exception as build_exp:
LOGGER.error(' get node.crt failed ! exception is %s', build_exp)
utils.delete_data(package_dir)
raise MCError(' get node.crt failed! exception is %s' % build_exp)
node_cfg = configparser.ConfigParser()
try:
with codecs.open('{}/config.ini'.format(node_dir),
'r', encoding='utf-8') as config_file:
node_cfg.readfp(config_file)
except Exception as build_exp:
LOGGER.error(
' open config.ini file failed, exception is %s', build_exp)
utils.delete_data(package_dir)
raise MCError(
' open config.ini file failed, exception is %s' % build_exp)
node_cfg.set("rpc", "listen_ip", rpc_ip[my_node_index])
node_cfg.set("rpc", "channel_listen_port",
channel_listen_port[my_node_index])
node_cfg.set("rpc", "jsonrpc_listen_port",
jsonrpc_listen_port[my_node_index])
# node_cfg.set("p2p", "listen_ip", p2p_ip[my_node_index])
node_cfg.set("p2p", "listen_port", p2p_listen_port[my_node_index])
with open('{}/config.ini'.format(node_dir), 'w') as config_file:
node_cfg.write(config_file)
config_file.close()
# set p2p ip in config.ini
for my_node_index, ip_item in enumerate(p2p_ip):
node_cfg = configparser.ConfigParser()
node_dir = '{}/node_{}_{}'.format(package_dir,
ip_item, p2p_listen_port[my_node_index])
try:
with codecs.open('{}/config.ini'.format(node_dir),
'r', encoding='utf-8') as config_file:
node_cfg.readfp(config_file)
except Exception as build_exp:
LOGGER.error(
' open config.ini file failed, exception is %s', build_exp)
utils.delete_data(package_dir)
raise MCError(
' open config.ini file failed, exception is %s' % build_exp)
for ip_idx, set_item in enumerate(p2p_ip):
node_cfg.set("p2p", "node.{}".format(ip_idx),
'{}:{}'.format(set_item, p2p_listen_port[ip_idx]))
with open('{}/config.ini'.format(node_dir), 'w') as config_file:
node_cfg.write(config_file)
# shutil.copy('{}/node_{}_{}/config.ini'.format(package_dir,
# p2p_ip[0],
# p2p_listen_port[0]),
# '{}/config.ini'.format(package_dir))
os.mkdir(package_dir + '/scripts/')
shutil.copy('{}/scripts/install.sh'.format(path.get_path()), package_dir + '/scripts/')
shutil.copy('{}/scripts/pack.sh'.format(path.get_path()), package_dir + '/scripts/')
shutil.copy('{}/tpl/start_all.sh'.format(path.get_path()), package_dir)
shutil.copy('{}/tpl/stop_all.sh'.format(path.get_path()), package_dir)
shutil.copytree('{}/scripts/monitor'.format((path.get_path())),
'{}/monitor'.format(package_dir))
LOGGER.info("build_config_ini end!")
def get_node_cert(get_path, send_path):
"""[get node crt to conf/]
Arguments:
get_path {[PATH]} -- [input file]
send_path {[PATH]} -- [output file]
Raises:
MCError -- [description]
MCError -- [description]
MCError -- [description]
"""
LOGGER.info("get node.crt in %s", get_path)
LOGGER.info("send node.crt in %s", send_path)
if not os.path.isfile(get_path):
LOGGER.error(' node cert doesn\'t existed! Need %s', get_path)
raise MCError(' node cert doesn\'t existed! Need %s' % get_path)
if os.path.isfile(send_path):
LOGGER.error(' node.crt existed! path is %s', send_path)
raise MCError(' node.crt existed! path is %s' % send_path)
with open(get_path) as cert_file:
node_crt = cert_file.read()
cert_begin = node_crt.count(
'-----BEGIN CERTIFICATE-----', 0, len(node_crt))
cert_end = node_crt.count(
'-----END CERTIFICATE-----', 0, len(node_crt))
if (cert_begin != 2) or (cert_end != 2):
LOGGER.error(
' node cert format checked failed! path is %s', get_path)
raise MCError(
' node cert format checked failed! path is %s' % get_path)
cert_file.close()
shutil.copy(get_path, send_path)
LOGGER.info("get_node_cert success! get path is %s", get_path)
LOGGER.info("get_node_cert success! send path is %s", send_path)
def get_nodeid(get_path, send_path):
"""[get nodeid into file]
Arguments:
get_path {[file]} -- [description]
send_path {[file]} -- [description]
Raises:
MCError -- [description]
"""
LOGGER.info("get_nodeid start! get path is %s", get_path)
LOGGER.info("get_nodeid start! send path is %s", send_path)
if not os.path.isfile(get_path):
LOGGER.error(' node cert doesn\'t existed! Need %s', get_path)
raise MCError(' node cert doesn\'t existed! Need %s' % get_path)
try:
if utils.Status.gm_option:
(status, result) = utils.getstatusoutput('~/.tassl x509 -text -in {}'
' | sed -n "15,20p" | sed '
'"s/://g" | sed "s/pub//g" |'
' tr "\n" " " | sed "s/ //g"'
' cut -c 3-130| cat >{}'
.format(get_path, send_path))
else:
(status, result) = utils.getstatusoutput('openssl x509 -text -in {}'
' | sed -n "15,20p" | sed "s/://g"'
' | tr "\n" " " | sed "s/ //g" |'
' cut -c 3-130| cat >{}'
.format(get_path, send_path))
if status != 0:
LOGGER.error(
' create nodeid failed! status is %d, output is %s, dir is %s.',
status, result, get_path)
LOGGER.info(
' create nodeid success! status is %d, output is %s, dir is %s.',
status, result, get_path)
except Exception as node_id_exp:
LOGGER.error(
' create nodeid failed! status is %d, output is %s, dir is %s.',
status, result, get_path)
raise MCError(' create nodeid failed! excepion is %s.' % node_id_exp)
LOGGER.info("get_nodeid success! get path is %s", get_path)
LOGGER.info("get_nodeid success! send path is %s", send_path)
def get_nodeid_str(get_path):
"""[get nodeid string]
Arguments:
get_path {[file]} -- [description]
Raises:
MCError -- [description]
Returns:
[string] -- [nodeid]
"""
# openssl x509 -text -in ./node.crt | sed -n '15,20p' | sed 's/://g' |
# tr "\n" " " | sed 's/ //g' | sed 's/pub//g' | cut -c 3-130
LOGGER.info("get_nodeid start! get path is %s", get_path)
if not os.path.isfile(get_path):
LOGGER.error(' node cert doesn\'t existed! Need %s', get_path)
raise MCError(' node cert doesn\'t existed! Need %s' % get_path)
try:
if utils.Status.gm_option:
(status, result) = utils.getstatusoutput('~/.tassl x509 -text -in {}'
' | sed -n "15,20p" | sed '
'"s/://g" | sed "s/pub//g" |'
' tr "\n" " " | sed "s/ //g"'
' | cut -c 3-130'.format(get_path))
result = result.split('\n')[0]
else:
(status, result) = utils.getstatusoutput('openssl x509 -text -in {}'
' | sed -n "15,20p" | sed '
'"s/://g" | sed "s/pub//g" |'
' tr "\n" " " | sed "s/ //g"'
' | cut -c 3-130'.format(get_path))
if status != 0:
LOGGER.error(
' create nodeid failed! status is %d, output is %s, dir is %s.',
status, result, get_path)
LOGGER.info(
' create nodeid success! status is %d, output is %s, dir is %s.',
status, result, get_path)
except Exception as node_id_exp:
LOGGER.error(
' create nodeid failed! status is %d, output is %s, dir is %s.',
status, result, get_path)
raise MCError(' create nodeid failed! excepion is %s.' % node_id_exp)
LOGGER.info("get_nodeid success! get path is %s", get_path)
return result
def concatenate_cfg(cfg_file, cfg_file_get):
"""[combine two config.ini]
Arguments:
cfg_file {[type]} -- [description]
cfg_file_get {[type]} -- [description]
Raises:
MCError -- [description]
"""
LOGGER.info("concatenate two config.ini now!")
meta = cfg_file
data = cfg_file_get
utils.file_must_exists(meta)
utils.file_must_exists(data)
p2p_get = []
p2p_get_ip = []
p2p_send = []
p2p_send_ip = []
p2p_cfg = configparser.ConfigParser()
try:
with codecs.open(meta, 'r', encoding='utf-8') as config_file:
p2p_cfg.readfp(config_file)
except Exception as build_exp:
LOGGER.error(
' open config.ini file failed, exception is %s', build_exp)
raise MCError(
' open config.ini file failed, exception is %s' % build_exp)
p2p_get = p2p_cfg.items('p2p')
p2p_get.pop(0)
p2p_get.pop(0)
LOGGER.info("get node is %s!", p2p_get)
for node_tuple in p2p_get:
p2p_get_ip.append(node_tuple[1])
LOGGER.info("get node ip is %s!", p2p_get_ip)
try:
with codecs.open(data, 'r', encoding='utf-8') as config_file:
p2p_cfg.readfp(config_file)
except Exception as build_exp:
LOGGER.error(
' open config.ini file failed, exception is %s', build_exp)
raise MCError(
' open config.ini file failed, exception is %s' % build_exp)
p2p_send = p2p_cfg.items('p2p')
p2p_send.pop(0)
p2p_send.pop(0)
LOGGER.info("send node is %s!", p2p_send)
for node_tuple in p2p_send:
p2p_send_ip.append(node_tuple[1])
LOGGER.info("get node ip is %s!", p2p_send_ip)
p2p_send_ip = list(set(p2p_get_ip + p2p_send_ip))
LOGGER.info("final node ip is %s!", p2p_send_ip)
for ip_idx, p2p_ip in enumerate(p2p_send_ip):
p2p_cfg.set("p2p", "node.{}".format(ip_idx), p2p_ip)
with open(data, 'w') as config_file:
p2p_cfg.write(config_file)
LOGGER.info(
"concatenate two config.ini now! output => %s/conf/config.ini", data)
def merge_cfg(p2p_list, cfg_file):
"""[combine config.ini]
Arguments:
p2p_list {[type]} -- [list]
cfg_file {[type]} -- [file]
Raises:
MCError -- [description]
"""
LOGGER.info("merge peers to config.ini now!")
data = cfg_file
utils.file_must_exists(data)
p2p_get = p2p_list
p2p_send = []
p2p_cfg = configparser.ConfigParser()
try:
with codecs.open(data, 'r', encoding='utf-8') as config_file:
p2p_cfg.readfp(config_file)
except Exception as build_exp:
LOGGER.error(
' open config.ini file failed, exception is %s', build_exp)
raise MCError(
' open config.ini file failed, exception is %s' % build_exp)
if p2p_cfg.has_section('p2p'):
p2p_send_opt = p2p_cfg.options('p2p')
else:
LOGGER.error(
' open config.ini file failed, exception is %s', build_exp)
raise MCError(
' open config.ini file failed, exception is %s' % build_exp)
for node in p2p_send_opt:
p2p_section = p2p_cfg.get('p2p', node)
p2p_send.append(p2p_section)
p2p_send.pop(0)
p2p_send.pop(0)
LOGGER.info("send node is %s!", p2p_send)
# for node_tuple in p2p_send:
# p2p_send.append(node_tuple)
LOGGER.info("get node ip is %s!", p2p_get)
p2p_send = list(set(p2p_send + p2p_get))
LOGGER.info("final node ip is %s!", p2p_send)
for ip_idx, p2p_ip in enumerate(p2p_send):
p2p_cfg.set("p2p", "node.{}".format(ip_idx), p2p_ip)
with open(data, 'w') as config_file:
p2p_cfg.write(config_file)
LOGGER.info(
"concatenate config.ini now! output => %s/conf/config.ini", data)
return True
def add_peers2cfg(_peers, _node):
"""[summary]
Arguments:
_peers {[type]} -- [description]
_node {[type]} -- [description]
"""
data_path = _peers
p2p_list = []
node_send = []
utils.file_must_exists(data_path)
try:
for line in open(data_path):
peer = line.strip('\n')
utils.valid_peer(peer)
p2p_list.append(peer)
except Exception as ini_exp:
LOGGER.error(
' add peers %s file failed, exception is %s', data_path, ini_exp)
raise MCError(
' add peers %s file failed, exception is %s' % (data_path, ini_exp))
LOGGER.info('merge peers is %s', p2p_list)
p2p_list = list(set(p2p_list))
node_send = utils.get_all_nodes_dir(_node)
for node_file in node_send:
utils.file_must_exists('{}/config.ini'.format(node_file))
merge_cfg(p2p_list, '{}/config.ini'.format(node_file))
def add_group(_group, _node):
"""
Arguments:
_group {[type]} -- [description]
_node {[type]} -- [description]
"""
data_path = _group
node_send = []
utils.file_must_exists(data_path)
file_name = os.path.basename(data_path)
group_id = utils.valid_genesis(file_name)
if group_id == 0:
raise MCError(' paser %s file failed' % (data_path))
node_send = utils.get_all_nodes_dir(_node)
for node_file in node_send:
utils.file_must_not_exists('{}/conf/{}'.format(node_file, file_name))
shutil.copyfile(data_path, '{}/conf/{}'.format(node_file, file_name))
shutil.copyfile('{}/tpl/group.i.ini'.format(path.get_path()),
'{}/conf/group.{}.ini'.format(node_file, group_id))
def get_console_file(_file):
"""[get console file]
Arguments:
_file {[type]} -- [description]
"""
data = _file
utils.file_must_exists(data)
p2p_ip = mconf.MchainConf.p2p_ip
channel_listen_port = mconf.MchainConf.channel_listen_port
channel_addr = []
group_id = mconf.MchainConf.group_id
utils.replace(data,
'"group1',
'"group{}'.format(group_id))
utils.replace(data,
'name="groupId" value="1"',
'name="groupId" value="{}"'.format(group_id))
for ip_idx, p2p_get in enumerate(p2p_ip):
channel_addr.append('{}:{}'.format(
p2p_get, channel_listen_port[ip_idx]))
cmd = "cat {} | grep -n connectionsStr | awk '{{print $1}}'".format(data)
(status, result) = utils.getstatusoutput(cmd)
result = result.strip('\n').strip(':')
if bool(status):
LOGGER.error(
' append console channel_addr failed, result is %s.', result)
raise MCError(
' append console channel_addr failed, result is %s.' % result)
line_num = int(result) + 1
for channel in channel_addr:
(status, result) \
= utils.getstatusoutput('sed -i "{} a'
'<value>{}</value>" {}'
.format(line_num, channel, data))
line_num = line_num + 1
CONSOLER.info('get console file end')
| [
15931,
17912,
79,
6005,
287,
72,
60,
198,
198,
21762,
2696,
25,
198,
220,
220,
220,
337,
5222,
81,
1472,
1377,
685,
11213,
60,
198,
220,
220,
220,
337,
5222,
81,
1472,
1377,
685,
11213,
60,
198,
220,
220,
220,
337,
5222,
81,
1472,... | 1.898938 | 11,577 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from datetime import datetime
from typing import Iterable, NamedTuple, Optional
import kfp
from pytz import timezone
KNOWN_TIMEZONE_TABLE = {"JST": "Asia/Tokyo"}
class Pipeline(NamedTuple):
"""Describes a pipeline deployed on the KFP instance."""
id: str
name: str
def main() -> None:
"""Entrypoint."""
parser = _build_argparser()
args = parser.parse_args()
deploy_pipeline(
args.deploy_target_host, args.pipeline_name, args.pipeline_file, args.timezone
)
def get_pipeline_id(client: kfp.Client, pipeline_name: str) -> Optional[str]:
"""Get pipeline ID if that is already deployed.
Args:
client (kfp.Client): kfp client
pipeline_name (str): name of pipeline
Returns:
Optional[str]: If found, return Pipeline ID. If not, return None.
"""
for p in _iterate_pipelines(client):
if p.name == pipeline_name:
return p.id
# not found
return None
def deploy_new_pipeline(
client: kfp.Client, pipeline_name: str, pipeline_file_path: str
) -> str:
"""Deploy the new pipeline into kubeflow pipelines.
Args:
client (kfp.Client): kfp client
pipeline_name (str): name of the pipeline
pipeline_file_path (str): upload pipeline file
Returns:
str: generated pipeline ID
"""
result = client.pipeline_uploads.upload_pipeline(
pipeline_file_path, name=pipeline_name
)
return result.id
def deploy_new_version(
client: kfp.Client, pipeline_id: str, pipeline_file_path: str, version_name: str
) -> str:
"""Deploy the new version of specified pipeline into kubeflow pipelines.
Args:
client (kfp.Client): kfp client
pipeline_id (str): ID of pipeline that deploy into.
pipeline_file_path (str): upload pipeline file
version_name (str): version string of pipeline. must be unique in the pipeline.
Returns:
str: deployed version id
"""
result = client.pipeline_uploads.upload_pipeline_version(
pipeline_file_path, pipelineid=pipeline_id, name=version_name
)
return result.id
def create_version_str(
pipeline_name: str,
tz_name: str,
timestamp: datetime = datetime.now(),
) -> str:
"""Create version string based on the local time.
Args:
pipeline_name (str): base version name.
tz_name (str): name of timezone, like "UTC", "JST".
Returns:
str: generated version name.
"""
if tz_name in KNOWN_TIMEZONE_TABLE:
tz_name = KNOWN_TIMEZONE_TABLE[tz_name]
now = timestamp.astimezone(timezone(tz_name))
return f"{pipeline_name}-v{now:%y%m%d}-{now:%H%M%S}"
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
1822,
29572,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
19720,
1330,
40806,
540,
11,
34441,
51,
29... | 2.538321 | 1,096 |
import unittest
import pytest
from pynYNAB.ClientFactory import nYnabClientFactory
from pynYNAB.schema import BudgetVersion
from pynYNAB.schema import Transaction
@pytest.fixture
| [
11748,
555,
715,
395,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
279,
2047,
56,
4535,
33,
13,
11792,
22810,
1330,
299,
56,
77,
397,
11792,
22810,
198,
6738,
279,
2047,
56,
4535,
33,
13,
15952,
2611,
1330,
15401,
14815,
198,
6738,
... | 2.983871 | 62 |
# this script pulls oi,funding and mark price data from bitmex, bybit, Okex, Binance for BTC perpetual swap contracts
# and aggreates data from whole market into files
#TODO: change data_file_path to your own in rows 42 and 174
# ---------------------------
# requirements:
# pip install bybit
# pip install bitmex-ws
# pip install APScheduler
# pip install bitmex
# if any other package is missing just pip install it
# ---------------------------
# imports --------------------------------------------------
import numpy as np
# bitmex imports
import bitmex
import datetime
# bybit imports
import bybit
# binance imports
from binance_f import RequestClient
# okex imports
from okex import swap_api as swap
from okex import futures_api as future
import json
from dhooks import Webhook
# -----------------------------------------------------------
hook = Webhook("YOUR DISCORD WEBHOOK URL")
def get_and_store_btc_data():
"""
This script pulls OI, funding and mark price from bitmex, bybit, binance and okex
"""
client = bitmex.bitmex(test=False)
instrument_data = client.Instrument.Instrument_get(symbol='XBTUSD').result()
mex_mark = round(instrument_data[0][0]["markPrice"], 1) # [USD]
mex_oi = round(instrument_data[0][0]["openInterest"] / 10 ** 6, 3) # [mil USD]
mex_funding = round(instrument_data[0][0]["fundingRate"] * 100, 3) # [%]
# -----------------------------------------------------------
# get data from bybit
client = bybit.bybit(test=False, api_key="", api_secret="")
info = client.Market.Market_symbolInfo(symbol="BTCUSD").result()
info_dict = info[0]["result"][0]
bybit_mark = round(float(info_dict["mark_price"]), 1) # [USD]
bybit_oi = round(int(info_dict["open_interest"]) / 10 ** 6, 3) # [mil USD]
bybit_funding = round(float(info_dict["funding_rate"]) * 100, 3) # [%]
# -----------------------------------------------------------
# get data from binance
request_client = RequestClient(api_key="None", secret_key="None", url="https://fapi.binance.com")
binance_oi_api = request_client.get_open_interest(symbol="BTCUSDT")
binance_mark_api = request_client.get_mark_price(symbol="BTCUSDT")
binance_mark = round(binance_mark_api.markPrice , 1) # [USD]
binance_funding = round(binance_mark_api.lastFundingRate * 100, 3) # [mil USD]
binance_oi = round(binance_oi_api.openInterest * binance_mark / 10 ** 6, 3) # [%]
# -----------------------------------------------------------
# get data from okex
api_key = ""
secret_key = ""
passphrase = ""
swap_contract = "BTC-USD-SWAP"
swapAPI = swap.SwapAPI(api_key, secret_key, passphrase)
mark_price_api = swapAPI.get_mark_price(swap_contract)
okex_mark = round(float(mark_price_api["mark_price"]), 1) # [USD]
funding_api = swapAPI.get_funding_time(swap_contract)
okex_funding = round(float(funding_api["funding_rate"]) * 100, 3) # [%]
oi = swapAPI.get_holds(swap_contract)
okex_oi = round(int(oi["amount"]) * 100 / 10 ** 6, 3) # [mil USD]
# -----------------------------------------------------------
# time
time = datetime.datetime.now().strftime("%Y-%d-%m %H:%M") # year-day-month hours-minutes-seconds
# -----------------------------------------------------------
# avg mark, cum OI, oi weighted funding
avg_mark = round(np.average([mex_mark, bybit_mark, binance_mark, okex_mark]), 2) # [USD]
cum_OI = round(np.sum([mex_oi, bybit_oi, binance_oi, okex_oi]), 3) # [mil USD 1000mil => 1bil]
oi_w_funding = round((mex_oi*mex_funding + bybit_oi*bybit_funding + binance_oi*binance_funding + okex_oi*okex_funding)/(mex_oi + bybit_oi + binance_oi + okex_oi), 3) # [%] => (-) bears are paying, (+) bulls are paying
dis_msg = f"```BTC: mark price: {avg_mark} $ || cum OI: {cum_OI} mil USD || OI w funding {oi_w_funding} %```"
hook.send(dis_msg)
# -----------------------------------------------------------
| [
2,
428,
4226,
16194,
267,
72,
11,
25032,
290,
1317,
2756,
1366,
422,
1643,
76,
1069,
11,
416,
2545,
11,
440,
365,
87,
11,
347,
14149,
329,
14503,
29079,
16075,
8592,
198,
2,
290,
4194,
630,
274,
1366,
422,
2187,
1910,
656,
3696,
1... | 2.62069 | 1,595 |
#coding=utf-8
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
| [
2,
66,
7656,
28,
40477,
12,
23,
198,
11748,
285,
87,
3262,
355,
285,
87,
198,
6738,
285,
87,
3262,
1330,
1278,
84,
261,
198,
6738,
285,
87,
3262,
13,
70,
2290,
261,
1330,
299,
77,
628
] | 2.297297 | 37 |
import pandas as pd
import numpy as np
import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter('ignore')
import tensorflow.compat.v1 as tf
from datetime import timedelta
from tqdm import tqdm | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
11748,
14601,
198,
361,
407,
25064,
13,
40539,
25811,
25,
198,
220,
220,
220,
14601,
13,
36439,
24455,
10786,
46430,
11537,
198,
11748,
11192,
... | 3.212121 | 66 |
from aerospike import predicates as as_predicates
import pytest
PREDICATE_METHDOS = [
as_predicates.equals,
as_predicates.contains,
as_predicates.between,
as_predicates.range,
as_predicates.geo_contains_geojson_point,
as_predicates.geo_contains_point,
as_predicates.geo_within_geojson_region,
as_predicates.geo_within_radius
]
@pytest.mark.parametrize('predicate', PREDICATE_METHDOS)
| [
6738,
9551,
2117,
522,
1330,
2747,
16856,
355,
355,
62,
28764,
16856,
198,
11748,
12972,
9288,
198,
198,
4805,
1961,
2149,
6158,
62,
47123,
10227,
2640,
796,
685,
198,
220,
220,
220,
355,
62,
28764,
16856,
13,
4853,
874,
11,
198,
220,... | 2.388571 | 175 |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 28 11:23:26 2017
@author: rickdberg
Create maps
"""
import numpy as np
import rasterio
from rasterio import Affine
from rasterio.warp import reproject, Resampling
import matplotlib.pyplot as plt
from site_metadata_compiler_completed import comp
import cartopy.crs as ccrs
import cartopy
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from user_parameters import (engine, metadata_table,
site_info, hole_info, std_grids_path, ml_inputs_path)
# Load site data
site_metadata = comp(engine, metadata_table, site_info, hole_info)
mask = np.loadtxt(std_grids_path + "continent_mask.txt"
, delimiter='\t')
mask = mask.astype('bool')
# Get template
f = rasterio.open(ml_inputs_path + "Martin - porosity productivity distances\grl53425-sup-0002-supinfo.grd"
)
newaff = f.transform
top_left = f.transform * (0,0)
bottom_right = f.transform * (f.width, f.height)
lat_interval = (bottom_right[1]-top_left[1])/f.height
lon_interval = (bottom_right[0] - top_left[0])/f.width
lat = f.xy(0,0)[1] + np.arange(f.height)*lat_interval
lon = f.xy(0,0)[0] + np.arange(f.width)*lon_interval
lon[lon > 180] -= 360
"""
# Load random forest grid into template
fluxes = np.loadtxt('fluxes_rf_noridge.txt', delimiter='\t')
rf = rasterio.open('rf.nc', 'w', driver='GMT',
height=f.shape[0], width=f.shape[1],
count=1, dtype=fluxes.dtype,
crs='+proj=latlong', transform=f.transform)
rf.write(fluxes, 1)
src = rf
rf.close()
"""
title = '$Sites\ with\ quantified\ fluxes$'
"""
# Plot random forest grid
# read image into ndarray
im = src.read()
# transpose the array from (band, row, col) to (row, col, band)
im = np.transpose(im, [1,2,0])
im = im[:,:,0]
xmin = src.transform[2]
xmax = src.transform[2] + src.transform[0]*src.width
ymin = src.transform[5] + src.transform[4]*src.height
ymax = src.transform[5]
"""
# define cartopy crs for the raster, based on rasterio metadata
crs = ccrs.PlateCarree()
# create figure
ax = plt.axes(projection=crs)
plt.title(title, fontsize=20)
ax.set_xmargin(0.05)
ax.set_ymargin(0.10)
ax.set_xlim(-180,180)
ax.set_ylim(-90,90)
# ax.stock_img()
# plot coastlines
#ax.add_feature(cartopy.feature.LAND)
#ax.add_feature(cartopy.feature.OCEAN)
ax.add_feature(cartopy.feature.COASTLINE, linewidth=0.3)
# ax.add_feature(cartopy.feature.BORDERS, linestyle=':')
#ax.add_feature(cartopy.feature.LAKES, alpha=0.5)
# ax.add_feature(cartopy.feature.RIVERS)
#ax.set_global()
ax.stock_img()
# To add points
fname = site_metadata[['lon','lat']].as_matrix()
# points = list(cartopy.io.shapereader.Reader(fname).geometries())
ax.scatter(fname[:,0], fname[:,1],
transform=ccrs.Geodetic(), c='y')
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
color='gray', alpha=0.2, linestyle='--', )
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
plt.show()
# eof
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
2758,
2579,
1367,
25,
1954,
25,
2075,
2177,
198,
198,
31,
9800,
25,
374,
624,
67,
3900,
198,
198,
16447,
8739,
628,
198,
198,
37811,
198... | 2.324981 | 1,317 |
"""
This script provides an example usage of the PyTurbSim API.
"""
# Begin by importing the PyTurbSim API:
import pyts.api as pyts
# Define some variables for the PyTurbSim run:
refht = 10.
ustar = 0.03
Uref = 3.
# First we initialize a PyTurbSim 'run' object:
tsr = pyts.tsrun()
# Next we give this run object a grid:
tsr.grid = pyts.tsGrid(
center=refht, ny=5, nz=5, height=5, width=9, time_sec=1000, dt=0.5)
# Now we define a mean 'profile model',
prof_model = pyts.profModels.h2l(Uref, refht, ustar)
# and assign it to the run object,
tsr.prof = prof_model
# These two steps can be completed in one as:
#tsr.profModel=pyts.profModels.h2l(U,refht,ustar)
# Next we define and assign a 'spectral model' to the run object,
tsr.spec = pyts.specModels.tidal(ustar, refht)
# ... and define/assign a 'coherence model',
tsr.cohere = pyts.cohereModels.nwtc()
# ... and define/assign a 'stress model',
tsr.stress = pyts.stressModels.tidal(ustar, refht)
# Now simply 'call' the run oject to produce the TurbSim output.
turbsim_output = tsr()
# We can save the output in 'bladed' format,
turbsim_output.write_bladed('ExampleOutput.bl')
| [
37811,
198,
1212,
4226,
3769,
281,
1672,
8748,
286,
262,
9485,
51,
5945,
8890,
7824,
13,
198,
37811,
198,
2,
16623,
416,
33332,
262,
9485,
51,
5945,
8890,
7824,
25,
198,
11748,
12972,
912,
13,
15042,
355,
12972,
912,
198,
198,
2,
28... | 2.647332 | 431 |
from django.shortcuts import redirect, render
from Guardian.views import login
from django.contrib.auth.decorators import login_required
from Guardian.decorators import admin_only
from Oracle.forms import TaskForm
from .tasks import Task, TaskStatus
@login_required(login_url='/guardian/login/')
@admin_only
@login_required(login_url='/guardian/login/')
@admin_only
@login_required(login_url='/guardian/login/')
@login_required(login_url='/guardian/login/')
@admin_only | [
6738,
42625,
14208,
13,
19509,
23779,
1330,
18941,
11,
8543,
198,
6738,
8283,
13,
33571,
1330,
17594,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
198,
6738,
8283,
13,
12501,
273,
2024... | 3.145695 | 151 |
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, backend, models, utils
from .utils import deduce_input_shape
def conv2d(input,
filters,
rows,
columns,
padding='same',
strides=(1,1),
name='conv',
data_format='channels_last',
batch_normalization=True,
activation='relu'):
"""
Constructions a convolutional layer with batch normalization
"""
net = layers.Conv2D(
filters,
(rows, columns),
strides=strides,
padding=padding,
use_bias=False,
name=name + '_conv',
data_format=data_format)(input)
ch_axis = get_channels_axis(data_format)
# Add batch normalization
if batch_normalization:
net = layers.BatchNormalization(axis=ch_axis,
scale=False, name=name + '_bn')(net)
# Add activation
if activation:
net = layers.Activation(activation, name=name)(net)
# Return the combined network
return net
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
1330,
41927,
292,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
1330,
11685,
11,
30203,
11,
4981,
11,
3384,
4487,
198,
198,
6738,
764... | 2.460241 | 415 |
"""IBM TRANSLATOR"""
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(
version='2021-09-15',
authenticator=authenticator
)
language_translator.set_service_url(url)
def english_to_french(english_text):
""" Function To Translate English To French """
try:
if len(english_text) >= 1:
translation = language_translator.translate(english_text, model_id='en-fr').get_result()
trans_list = translation["translations"]
trans_dict = trans_list[0]
french_text = trans_dict['translation']
return french_text
else:
french_text = ""
return french_text
except ValueError:
return None
def french_to_english(french_text):
""" Function To Translate French To English """
try:
if len(french_text) >= 1:
translation = language_translator.translate(french_text, model_id='fr-en').get_result()
trans_list = translation["translations"]
trans_dict = trans_list[0]
english_text = trans_dict['translation']
return english_text
else:
english_text = ""
return english_text
except ValueError:
return None
#print(englishToFrench('Hello'))
#print(frenchToEnglish('Bonjour'))
| [
37811,
9865,
44,
48213,
8634,
25633,
37811,
198,
198,
11748,
28686,
198,
6738,
24283,
76,
62,
86,
13506,
1330,
15417,
8291,
41880,
53,
18,
198,
6738,
24283,
76,
62,
17721,
62,
21282,
74,
62,
7295,
13,
41299,
44549,
1330,
314,
2390,
47... | 2.425 | 640 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetRepositoryResult',
'AwaitableGetRepositoryResult',
'get_repository',
'get_repository_output',
]
@pulumi.output_type
# pylint: disable=using-constant-test
def get_repository(id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRepositoryResult:
"""
Resource Type definition for AWS::CodeCommit::Repository
"""
__args__ = dict()
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:codecommit:getRepository', __args__, opts=opts, typ=GetRepositoryResult).value
return AwaitableGetRepositoryResult(
arn=__ret__.arn,
clone_url_http=__ret__.clone_url_http,
clone_url_ssh=__ret__.clone_url_ssh,
code=__ret__.code,
id=__ret__.id,
name=__ret__.name,
repository_description=__ret__.repository_description,
repository_name=__ret__.repository_name,
tags=__ret__.tags,
triggers=__ret__.triggers)
@_utilities.lift_output_func(get_repository)
def get_repository_output(id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRepositoryResult]:
"""
Resource Type definition for AWS::CodeCommit::Repository
"""
...
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
26144,
35986,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760,
644,
345,
389,
1804,
0,
17202,
... | 2.477716 | 718 |
"""Logger module contains LogManager which sets up file and stream handler + formatting."""
import logging
import re
from pathlib import Path
import logredactor
from rich.logging import RichHandler
class LogManager:
"""Manages the logs formats and levels.
We have 2 loggers one to stout and one to a logger file. General logger level is DEBUG and
each handler is set dynamically based on log-level CLI args
"""
def __init__(
self,
log_file_path: Path = Path(Path.cwd(), "dbt_sugar_logs"),
log_to_console: bool = True,
):
"""Log manager constructor. can take and override log path + whether to stout or not.
Args:
log_file_path (Path, optional): Custom path to logger file.
Defaults to Path(Path.cwd(), "dbt_sugar_log").
log_to_console (bool, optional): When true logs will also be pushed into stout.
Defaults to True.
"""
Path(log_file_path).mkdir(parents=True, exist_ok=True)
log_filename = Path(log_file_path, "dbt_sugar_log.log")
logger = logging.getLogger("dbt-sugar logger")
# set the logger to the lowest level (then each handler will have it's level --this ensures
# that all logging always ends up in the file logger.)
logger.setLevel(logging.DEBUG)
# Create handlers
f_handler = logging.FileHandler(log_filename)
f_handler.setLevel(logging.DEBUG)
# Create formatters and add it to handlers
f_format = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(message)s"
)
f_handler.setFormatter(f_format)
# Add handlers to the logger
logger.addHandler(f_handler)
# if we want to print the log to console we're going to add a streamhandler
if log_to_console:
c_handler = RichHandler(
rich_tracebacks=True,
show_level=False,
markup=True,
enable_link_path=False,
show_path=False,
)
c_handler.setLevel(logging.INFO)
logger.addHandler(c_handler)
redact_patterns = [re.compile(r"(?<=password=).*(?= database)")]
logger.addFilter(logredactor.RedactingFilter(redact_patterns, default_mask="'*hidden*'"))
self.logger = logger
self.f_format = f_format
def set_debug(self):
"""Set all loggers handlers to debug level."""
self.logger.setLevel(logging.DEBUG)
for handler in self.logger.handlers:
handler.setLevel(logging.DEBUG)
log_manager = LogManager()
GLOBAL_LOGGER = log_manager.logger
| [
37811,
11187,
1362,
8265,
4909,
5972,
13511,
543,
5621,
510,
2393,
290,
4269,
21360,
1343,
33313,
526,
15931,
198,
198,
11748,
18931,
198,
11748,
302,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
2604,
445,
11218,
198,
6738,
5527,... | 2.368745 | 1,139 |
"""
Helper classes for the management of subscription and unsubscription of the
Items handled by the Remote Data Adapter.
"""
from contextlib import contextmanager
import threading
from _collections import deque
from lightstreamer_adapter.protocol import RemotingException
from . import DATA_PROVIDER_LOGGER
class _ItemTaskManager():
"""Helper class which schedules the execution of the tasks relative to a
specified Item.
This class manages a queue of tasks to be executed for an Item. Tasks
are dequeued by a single thread, which is submitted to the Executor
configured by the Data Provider Server and then propagated to the
Subscription Manager. The single thread ensures that each unique task is
is executed in a sequentialized way, in order to avoid any synchronisation
issue that may affect the Item consistency.
"""
def inc_queued(self):
"""Increments the total number of task submitted to this _ItemManager.
"""
self._queued += 1
def add_task(self, task):
"""Add to the queue the provided task to be asynchronously by the
Executor.
"""
with self._lock:
self._tasks_deq.append(task)
# Starts only a single dequeuing task, which is submitted to the
# Executor.
if not self._isrunning:
self._isrunning = True
self._subscription_mgr.execute_task(self._deque)
@property
def code(self):
""""The current Request Id.
"""
return self._code
def _deque(self):
"""Dequeuing task submitted to the Executor.
This task dequeues all _ItemTask instances submitted to this
_ItemManager, and executes the wrapped task and 'late' task, the latter
if required.
"""
dequeued = 0
last_subscribe_outcome = True
while True:
with self._lock:
if dequeued == 0:
last_subscribe_outcome = self._last_subscribe_outcome
if len(self._tasks_deq) == 0:
self._isrunning = False
self._last_subscribe_outcome = last_subscribe_outcome
break
# Gets the next _ItemTask.
item_task = self._tasks_deq.popleft()
islast = len(self._tasks_deq) == 0
dequeued += 1
try:
if item_task.issubscribe:
# Current scheduled task is a Subscription
if not islast:
item_task.do_late_task()
last_subscribe_outcome = False
else:
with self._subscription_mgr.sync_items():
# The current Request Id is set to the one of
# the current scheduled task.
self._code = item_task.code
last_subscribe_outcome = item_task.do_task()
else:
# Current scheduled task is an Unsubscription
if last_subscribe_outcome:
# Previous subscription with success, so execute the
# the unsubscription task,
item_task.do_task()
else:
# Issue in the previuos subscription, so execute the
# 'late task'.
item_task.do_late_task()
with self._subscription_mgr.sync_items():
# In case of unsubscription, putting the current
# Request Id to None indicates that no more updates are
# expected for this Item.
self._code = None
except RemotingException:
DATA_PROVIDER_LOGGER.error("Caught an exception")
# Invokes the _dec_dequeued method through the SubscriptionManager,
# while the RLock associated with the Subscription Manager is kept.
with self._subscription_mgr.sync_items():
self._dec_queued(dequeued)
def _dec_queued(self, dequeued):
"""Decrements the total number of enqueued tasks, until it will be
necessary to remove this _ItemTaskManager from the SubscriptionManager.
"""
self._queued -= dequeued
if not self._code and self._queued == 0:
item_manager = self._subscription_mgr.get_item_mgr(self._item_name)
if not item_manager:
pass
elif item_manager != self:
pass
else:
self._subscription_mgr.del_active_item(self._item_name)
class ItemTask():
"""Simple class which wraps the execution of a task relative to the
provided Request Id.
Each instance of ItemTask wraps both the task and the "late" task. The
"late" task has to be submitted in case the execution task has been
requested too late by the Lightstreamer Server or its outcome was a
failure.
"""
def do_task(self):
"""Executea the task.
"""
return self._do_task()
def do_late_task(self):
"""Executea the late task.
"""
self._do_late_task()
@property
def code(self):
"""The Request Id originating this task execution.
"""
return self._request_id
@property
def issubscribe(self):
"""Indicates if this task is a Subscription (True) or an Unsubscription
(False).
"""
return self._issubscribe
class SubscriptionManager():
"""Helper class for the subscription management.
This class hides the complexity related with the synchronization required
to handle in a properly way the subscription and unsubscription operations
for the items.
Subscriptions and unsubscription operations are managed asynchronously
through the submission of related tasks to an Executor.
"""
def execute_task(self, task):
"""Executes the provided task.
The task is submitted to the Executor configured bye the Data Provider
Server and then propagated to this Subscription Manager.
"""
self._executor.submit(task)
def do_subscription(self, item_name, sub_task):
"""Schedules the execution of the 'sub_task' function, provided by the
DataProvider server for managing the subscription of the 'item_name'
Item.
The sub_task is a sequence of operations which involve the Remote Data
Adapter attached to the Data Provider Server.
"""
with self._active_items_lock:
if item_name not in self._active_items:
# Initializes a new _ItemTaskManager for the provided
# item_name.
self._active_items[item_name] = _ItemTaskManager(item_name,
self)
item_manager = self._active_items[item_name]
item_manager.inc_queued()
# Submits the task to the _ItemTaskManager.
item_manager.add_task(sub_task)
def do_unsubscription(self, item_name, unsub_task):
"""Schedules the execution of the 'ubsub_task' function, provided by
the DataProvider server for managing the unsubscription of the
'item_name' Item.
The ubsub_task is a sequence of operations which involve the Remote
Data Adapter attached to the Data Provider Server.
"""
with self._active_items_lock:
if item_name not in self._active_items:
DATA_PROVIDER_LOGGER.error("Task list expected for item %s",
item_name)
return
item_manager = self._active_items[item_name]
item_manager.inc_queued()
# Submits the task to the _ItemTaskManager.
item_manager.add_task(unsub_task)
@contextmanager
def sync_items(self):
"""Defines the function for the 'with' statement, in order to execute a
block while the RLock associated with the internal items dictionary
is acquired.
"""
with self._active_items_lock:
yield
def get_item_mgr(self, item_name):
"""Retrieves the _ItemTaskManager associated with the provided
item_name.
This method is used only internally by the _ItemTaskManager to decide
whether to remove itself from the SubscriptionManager, trough an
invocation to the 'del_active_item' method.
"""
return self._active_items.get(item_name)
def get_active_item(self, item_name):
"""Retrieves the 'item_name' Item.
"""
with self._active_items_lock:
if item_name in self._active_items:
item_manager = self._active_items[item_name]
return item_manager.code
return None
def del_active_item(self, item_name):
"""Removes the 'item_name' Item from this Susbcription Manager.
"""
if item_name in self._active_items:
del self._active_items[item_name]
| [
37811,
198,
47429,
6097,
329,
262,
4542,
286,
14569,
290,
32793,
33584,
286,
262,
198,
23022,
12118,
416,
262,
21520,
6060,
43721,
13,
198,
37811,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
11748,
4704,
278,
198,
6738,
4808,
4033,
2... | 2.313223 | 3,978 |
import math
from collections import OrderedDict
import numpy as np
import scipy.signal
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
if __name__ == '__main__':
pass
| [
11748,
10688,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
12683,
282,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
... | 3.175676 | 74 |
from Tkinter import *
from tkSimpleDialog import Dialog
import json
import csv
import tkFileDialog
class ErrorWindow(Dialog):
"""
Provided a list of error messages, shows them in a simple pop-up window.
"""
| [
6738,
309,
74,
3849,
1330,
1635,
198,
6738,
256,
74,
26437,
44204,
1330,
21269,
519,
198,
11748,
33918,
198,
11748,
269,
21370,
198,
11748,
256,
74,
8979,
44204,
198,
198,
4871,
13047,
27703,
7,
44204,
2599,
198,
220,
220,
220,
37227,
... | 3.217391 | 69 |
class Species(object):
""" A collection of genetically similar individuals."""
| [
4871,
28540,
7,
15252,
2599,
198,
220,
220,
220,
37227,
317,
4947,
286,
20807,
2092,
3925,
526,
15931,
198
] | 4.368421 | 19 |
from datetime import date, timedelta
from typing import List, Optional, Union, Iterator
import arrow
from sqlalchemy.orm import Session
from app.database.models import Event
from app.routers.event import sort_by_date
from app.routers.user import get_all_user_events
def get_events_per_dates(
session: Session,
user_id: int,
start: Optional[date],
end: Optional[date]
) -> Union[Iterator[Event], list]:
"""Read from the db. Return a list of all
the user events between the relevant dates."""
if start > end:
return []
return (
filter_dates(
sort_by_date(
get_all_user_events(session, user_id)
),
start,
end,
)
)
def build_arrow_delta_granularity(diff: timedelta) -> List[str]:
"""Builds the granularity for the arrow module string"""
granularity = []
if diff.days > 0:
granularity.append("day")
hours, remainder = divmod(diff.seconds, 60 * 60)
if hours > 0:
granularity.append("hour")
minutes, _ = divmod(remainder, 60)
if minutes > 0:
granularity.append("minute")
return granularity
def get_time_delta_string(start: date, end: date) -> str:
"""Builds a string of the event's duration- days, hours and minutes."""
arrow_start = arrow.get(start)
arrow_end = arrow.get(end)
diff = end - start
granularity = build_arrow_delta_granularity(diff)
duration_string = arrow_end.humanize(
arrow_start, only_distance=True, granularity=granularity
)
return duration_string
def filter_dates(
events: List[Event], start: Optional[date],
end: Optional[date]) -> Iterator[Event]:
"""filter events by a time frame."""
yield from (
event for event in events
if start <= event.start.date() <= end
)
| [
6738,
4818,
8079,
1330,
3128,
11,
28805,
12514,
198,
6738,
19720,
1330,
7343,
11,
32233,
11,
4479,
11,
40806,
1352,
198,
198,
11748,
15452,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
23575,
198,
198,
6738,
598,
13,
48806,
13,
27530,
... | 2.536585 | 738 |
r"""
===================================================
Cone / Cylinder DataBase (:mod:`desicos.conecylDB`)
===================================================
.. currentmodule:: desicos.conecylDB
The ``desicos.conecylDB`` module includes all the information about cones
and cylinders required to reproduce structures that were investigated
by many publications and in the context of DESICOS.
It also includes the tools necessary to work with the Imperfection
DataBase. Unfortunately, the files composing this database cannot be made
available with the repository, but all the tools required to post process
an imperfection file had been made available.
.. automodule:: desicos.conecylDB.conecylDB
:members:
.. automodule:: desicos.conecylDB.ccs
:members:
.. automodule:: desicos.conecylDB.laminaprops
:members:
.. automodule:: desicos.conecylDB.allowables
:members:
.. automodule:: desicos.conecylDB.fit_data
:members:
.. automodule:: desicos.conecylDB.interpolate
:members:
.. automodule:: desicos.conecylDB.read_write
:members:
"""
from __future__ import absolute_import
from .conecylDB import *
| [
81,
37811,
198,
10052,
4770,
18604,
198,
34,
505,
1220,
327,
2645,
5540,
6060,
14881,
357,
25,
4666,
25,
63,
8906,
291,
418,
13,
49180,
38801,
11012,
63,
8,
198,
10052,
4770,
18604,
198,
198,
492,
1459,
21412,
3712,
748,
291,
418,
1... | 3.372781 | 338 |
#coding=UTF-8
'''
Created on 2011-7-5
@author: Administrator
'''
import threading
import time
from spider import soufang
from spider import ganji
from spider import tongcheng58
from spider.threadpool import ThreadPool, makeRequests
import urllib2
import urllib
from spider.globalvars import fetch_quere
from spider.jjrlog import msglogger
import gc
import random
import spider
gc.enable()
#gc.set_debug(gc.DEBUG_COLLECTABLE | gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_INSTANCES | gc.DEBUG_OBJECTS)
coctn=True
# for r in res[0]:
# fetch_quere.put({"link":r,"args":res[1]})
# print p.decode('gbk')
if __name__=="__main__":
data=[
# ["tongcheng58","su","1"],
# ["tongcheng58","su","2"],
["tongcheng58","cz","3"],
# ["tongcheng58","su","4"],
## [soufang,"su","1"],
# ["ganji","su","1"],
# ["ganji","su","2"],
# ["ganji","su","3"],
# ["ganji","su","4"],
]
# linksThead(data)
fl=fetchLinkThreadControl(data)
fl.start()
print ""
time.sleep(5)
fd=fetchDataThreadControl(100)
fd.setDaemon(True)
fd.start()
# linksThead(data)
# print getattr(spider,"tongcheng58")
# lf=file("link.log")
# idx=0
# for line in lf.readlines():
# lk=line.split('|')
# fetch_quere.put({"mod":"tongcheng58","link":lk[1],"citycode":"su","kind":lk[0]})
# idx=idx+1
# if idx%25==0:
# time.sleep(random.randint(1,30))
# try:
# ct=CThread("su",'1',3000,3)
# ct.start()
# except:
# pass | [
2,
66,
7656,
28,
48504,
12,
23,
198,
7061,
6,
198,
41972,
319,
2813,
12,
22,
12,
20,
198,
198,
31,
9800,
25,
22998,
198,
7061,
6,
198,
11748,
4704,
278,
198,
11748,
640,
198,
6738,
19230,
1330,
24049,
69,
648,
198,
6738,
19230,
... | 1.978235 | 827 |
"""
Layout: Footer of the dashboard
"""
# Third party imports
import dash_html_components as html
subfields = html.Div(
[
html.Span('Subfields of SED data used for this department: '),
html.Span(id='searchcom-search-subfields')
],
id='searchcom-subfields-footer',
className='mt-3 text-muted'
)
| [
37811,
198,
32517,
25,
7870,
263,
286,
262,
30415,
198,
37811,
198,
198,
2,
10467,
2151,
17944,
198,
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
628,
198,
7266,
25747,
796,
27711,
13,
24095,
7,
198,
220,
220,
220,
685,
198,
... | 2.570313 | 128 |
import argparse
import os
import logging
import time
import espresso_ir
from espresso_ir.mods import (cloudtrail_api, s3_bucket_cloudtrail,
ssm_setup, s3_buckets_ir, memdump, flow_logs, get_logs, ec2_snapshot,
vpc
)
#List of args, Cases number, Region, EC2 instance ID, Dump memeory, Set up API reording, Flow logs, EC2 Snapshot, EC2 Isolation
t0 = time.time()
if __name__ == "__main__":
cli()
| [
11748,
1822,
29572,
201,
198,
11748,
28686,
201,
198,
11748,
18931,
201,
198,
11748,
640,
201,
198,
201,
198,
201,
198,
11748,
48247,
62,
343,
201,
198,
6738,
48247,
62,
343,
13,
24122,
1330,
357,
17721,
9535,
346,
62,
15042,
11,
264,... | 2.351064 | 188 |
#!/bin/python
import sys
totval = map(int,raw_input().split())
i=0
for val in totval:
val[i:len(totval)]
i = i + val
print i | [
2,
48443,
8800,
14,
29412,
198,
198,
11748,
25064,
628,
198,
83,
313,
2100,
796,
3975,
7,
600,
11,
1831,
62,
15414,
22446,
35312,
28955,
198,
198,
72,
28,
15,
198,
1640,
1188,
287,
2006,
2100,
25,
198,
220,
220,
220,
1188,
58,
72,... | 2.15873 | 63 |
import pathlib
import PIL
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
sport_balls_data_url = 'https://github.com/jjz17/Sport-Ball-Image-Classifier/raw/main/data/sport_ball_images.zip'
data_dir = tf.keras.utils.get_file('images', sport_balls_data_url, extract=True)
# data_dir = pathlib.Path(data_dir)
data_dir = pathlib.Path('/Users/jasonzhang/.keras/datasets/sport_ball_images')
# dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
# data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
# data_dir = pathlib.Path(data_dir)
# print(type(data_dir))
image_count = len(list(data_dir.glob('*/*.jpg')))
print(f'# of images: {image_count}')
basketball_count = len(list(data_dir.glob('basketball/*.jpg')))
print(f'# of basketballs: {basketball_count}')
soccer_count = len(list(data_dir.glob('soccer/*.jpg')))
print(f'# of soccerballs: {soccer_count}')
basketballs = list(data_dir.glob('basketball/*'))
PIL.Image.open(str(basketballs[0]))
'''
Load data using a Keras utility
Let's load these images off disk using the helpful `tf.keras.utils.image_dataset_from_directory` utility. This will take you from a directory of images on disk to a `tf.data.Dataset` in just a couple lines of code. If you like, you can also write your own data loading code from scratch by visiting the [Load and preprocess images](../load_data/images.ipynb) tutorial.
Create a dataset
Define some parameters for the loader:
'''
batch_size = 32
img_height = 180
img_width = 180
# It's good practice to use a validation split when developing your model.
# Let's use 80% of the images for training, and 20% for validation.
train_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
# You can find the class names in the `class_names` attribute on these datasets.
# These correspond to the directory names in alphabetical order.
class_names = train_ds.class_names
print(f'Classes: {class_names}')
# Visualize the data
# Here are the first nine images from the training dataset:
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
plt.show()
# You will train a model using these datasets by passing them to `Model.fit` in a moment.
# If you like, you can also manually iterate over the dataset and retrieve batches of images:
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
# The `image_batch` is a tensor of the shape `(32, 180, 180, 3)`.
# This is a batch of 32 images of shape `180x180x3` (the last dimension refers to color channels RGB).
# The `label_batch` is a tensor of the shape `(32,)`, these are corresponding labels to the 32 images.
# You can call `.numpy()` on the `image_batch` and `labels_batch` tensors to convert them
# to a `numpy.ndarray`.
# Configure the dataset for performance
# Let's make sure to use buffered prefetching so you can yield data from disk without
# having I/O become blocking. These are two important methods you should use when loading data:
# - `Dataset.cache` keeps the images in memory after they're loaded off disk during the first epoch.
# This will ensure the dataset does not become a bottleneck while training your model.
# If your dataset is too large to fit into memory, you can also use this method to create a performant on-disk cache.
# - `Dataset.prefetch` overlaps data preprocessing and model execution while training.
# Interested readers can learn more about both methods, as well as how to cache data to disk in
# the *Prefetching* section of the [Better performance with the tf.data API](../../guide/data_performance.ipynb) guide.
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
# Standardize the data
# The RGB channel values are in the `[0, 255]` range. This is not ideal for a neural network;
# in general you should seek to make your input values small.
# Here, you will standardize values to be in the `[0, 1]` range by using `tf.keras.layers.Rescaling`:
normalization_layer = layers.Rescaling(1. / 255)
# There are two ways to use this layer. You can apply it to the dataset by calling `Dataset.map`:
normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
image_batch, labels_batch = next(iter(normalized_ds))
first_image = image_batch[0]
# Notice the pixel values are now in `[0,1]`.
print(np.min(first_image), np.max(first_image))
# Or, you can include the layer inside your model definition, which can simplify deployment.
# Let's use the second approach here.
# Note: You previously resized images using the `image_size` argument of `tf.keras.utils.image_dataset_from_directory`.
# If you want to include the resizing logic in your model as well, you can use the `tf.keras.layers.Resizing` layer.
# Create the model
# The [Sequential](https://www.tensorflow.org/guide/keras/sequential_model) model consists of
# three convolution blocks (`tf.keras.layers.Conv2D`) with a max pooling layer (`tf.keras.layers.MaxPooling2D`)
# in each of them. There's a fully-connected layer (`tf.keras.layers.Dense`) with 128 units on top of it that
# is activated by a ReLU activation function (`'relu'`). This model has not been tuned for high accuracy—the
# goal of this tutorial is to show a standard approach.
num_classes = len(class_names)
model = Sequential([
layers.Rescaling(1. / 255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
# Compile the model
# For this tutorial, choose the `tf.keras.optimizers.Adam` optimizer and `tf.keras.losses.SparseCategoricalCrossentropy`
# loss function. To view training and validation accuracy for each training epoch, pass the `metrics` argument to
# `Model.compile`.
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# Model summary
# View all the layers of the network using the model's `Model.summary` method:
model.summary()
# Train the model
epochs = 10
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
## Visualize training results
# Create plots of loss and accuracy on the training and validation sets:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# The plots show that training accuracy and validation accuracy are very close, and the model has
# achieved over 90% accuracy on the validation set.
# The plots show that training accuracy and validation accuracy are off by large margins, and the model has
# achieved only around 60% accuracy on the validation set.
# Let's inspect what went wrong and try to increase the overall performance of the model.
# %%
# Overfitting
# In the plots above, the training accuracy is increasing linearly over time, whereas validation accuracy stalls
# around 60% in the training process. Also, the difference in accuracy between training and validation accuracy
# is noticeable—a sign of [overfitting](https://www.tensorflow.org/tutorials/keras/overfit_and_underfit).
# When there are a small number of training examples, the model sometimes learns from noises or unwanted details
# from training examples—to an extent that it negatively impacts the performance of the model on new examples.
# This phenomenon is known as overfitting. It means that the model will have a difficult time generalizing on a
# new dataset.
# There are multiple ways to fight overfitting in the training process. In this tutorial, you'll use *data
# augmentation* and add *Dropout* to your model.
#
# Data augmentation
# Overfitting generally occurs when there are a small number of training examples.
# [Data augmentation](./data_augmentation.ipynb) takes the approach of generating additional training data
# from your existing examples by augmenting them using random transformations that yield believable-looking
# images. This helps expose the model to more aspects of the data and generalize better.
# You will implement data augmentation using the following Keras preprocessing layers: `tf.keras.layers.RandomFlip`,
# `tf.keras.layers.RandomRotation`, and `tf.keras.layers.RandomZoom`. These can be included inside your model like
# other layers, and run on the GPU.
data_augmentation = keras.Sequential(
[
layers.RandomFlip("horizontal",
input_shape=(img_height,
img_width,
3)),
layers.RandomRotation(0.1),
layers.RandomZoom(0.1),
]
)
# Let's visualize what a few augmented examples look like by applying data augmentation to the same image several times:
plt.figure(figsize=(10, 10))
for images, _ in train_ds.take(1):
for i in range(9):
augmented_images = data_augmentation(images)
ax = plt.subplot(3, 3, i + 1)
plt.imshow(augmented_images[0].numpy().astype("uint8"))
plt.axis("off")
plt.show()
# You will use data augmentation to train a model in a moment.
# Dropout
# Another technique to reduce overfitting is to introduce
# [dropout](https://developers.google.com/machine-learning/glossary#dropout_regularization) regularization to the
# network.
# When you apply dropout to a layer, it randomly drops out (by setting the activation to zero) a number of output
# units from the layer during the training process. Dropout takes a fractional number as its input value, in the
# form such as 0.1, 0.2, 0.4, etc. This means dropping out 10%, 20% or 40% of the output units randomly from the
# applied layer.
# Let's create a new neural network with `tf.keras.layers.Dropout` before training it using the augmented images:
model = Sequential([
data_augmentation,
layers.Rescaling(1. / 255),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
# Compile and train the model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
epochs = 15
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
# Visualize training results
# After applying data augmentation and `tf.keras.layers.Dropout`, there is less overfitting than before, and
# training and validation accuracy are closer aligned:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# Predict on new data
# Finally, let's use our model to classify an image that wasn't included in the training or validation sets.
# Note: Data augmentation and dropout layers are inactive at inference time.
# %%
# sunflower_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/592px-Red_sunflower.jpg"
# sunflower_path = tf.keras.utils.get_file('Red_sunflower', origin=sunflower_url)
basketball_url = 'https://github.com/jjz17/Sport-Ball-Image-Classifier/raw/main/data/sport_ball_images/test_images/basketball.jpg'
# basketball_path = tf.keras.utils.get_file('basketball', origin=basketball_url)
soccerball_url = 'https://github.com/jjz17/Sport-Ball-Image-Classifier/raw/main/data/sport_ball_images/test_images/soccerball.jpg'
# soccerball_path = tf.keras.utils.get_file('soccerball', origin=soccerball_url)
# img = tf.keras.utils.load_img(
# sunflower_path, target_size=(img_height, img_width)
# )
# img_array = tf.keras.utils.img_to_array(img)
# img_array = tf.expand_dims(img_array, 0) # Create a batch
#
# predictions = model.predict(img_array)
# score = tf.nn.softmax(predictions[0])
#
# print(
# "This image most likely belongs to {} with a {:.2f} percent confidence."
# .format(class_names[np.argmax(score)], 100 * np.max(score))
# )
# img = tf.keras.utils.load_img(
# basketball_path, target_size=(img_height, img_width)
# )
# img_array = tf.keras.utils.img_to_array(img)
# img_array = tf.expand_dims(img_array, 0) # Create a batch
#
# predictions = model.predict(img_array)
# score = tf.nn.softmax(predictions[0])
#
# print(
# "This image most likely belongs to {} with a {:.2f} percent confidence."
# .format(class_names[np.argmax(score)], 100 * np.max(score))
# )
#
# img = tf.keras.utils.load_img(
# soccerball_path, target_size=(img_height, img_width)
# )
# img_array = tf.keras.utils.img_to_array(img)
# img_array = tf.expand_dims(img_array, 0) # Create a batch
#
# predictions = model.predict(img_array)
# score = tf.nn.softmax(predictions[0])
#
# print(
# "This image most likely belongs to {} with a {:.2f} percent confidence."
# .format(class_names[np.argmax(score)], 100 * np.max(score))
# )
predict_image(basketball_url, 'basketball')
predict_image(soccerball_url, 'soccerball') | [
11748,
3108,
8019,
198,
198,
11748,
350,
4146,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
1330,
41927,
292,
... | 2.938878 | 5,170 |
#!-*- coding: utf8 -*-
from selenium.webdriver.common.keys import Keys
from sklearn.naive_bayes import MultinomialNB
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas
import time
import nltk
import os
class Message:
"""This class access the whatsapp, seek for unread messages and replies it.
"""
def get_unread(self):
"""This function gets the unread chats and click on it.
Returns
-------
"""
try:
unread_chat = self.driver.find_element_by_class_name('P6z4j')
unread_chat.click()
time.sleep(5)
self.get_last_message()
except Exception as e:
e.args
pass
def get_source_code(self):
"""This function gets the source code from whatsapp web and retunrn it.
Returns
-------
BeautifulSoup(html, 'html5lib') : bs4.BeautifulSoup
Parsed html.
"""
html = self.driver.page_source
return BeautifulSoup(html, 'html.parser')
def get_last_message(self):
"""This functions get the last unread message.
Returns
-------
"""
soup = self.get_source_code()
lst_msg = soup.find_all('span', {'class': 'selectable-text invisible-space copyable-text'})
try:
msg = lst_msg[-1].text
input_box = self.driver.find_element_by_xpath('//*[@id="main"]/footer/div[1]/div[2]/div/div[2]')
input_box.send_keys(self.nltk.pred(self.model, msg, self.librarian))
input_box.send_keys(Keys.ENTER)
except Exception as e:
e.args
pass
def __call__(self, *args, **kwargs):
"""Main function
Parameters
----------
args
kwargs
Returns
-------
"""
print('Starting API')
input()
while True:
self.get_unread()
class NLTK:
"""This class make the natural language processing for a given text input.
"""
# Used in main function
def cleaning_dict(self):
"""This function creates and fill a set of stem valid words.
Returns
-------
valid_words : dict
Dictionary with stem valid words.
"""
dictionary = set()
for i in self.df_token:
valid_words = [self.stemmer.stem(nxDF) for nxDF in i if nxDF not in self.stopwords]
dictionary.update(valid_words)
tuples = zip(dictionary, range(len(dictionary)))
return {word: i for word, i in tuples}
# Used in fit
def vectorise(self, txt, librarian):
"""This function vectorises a text input.
Parameters
----------
txt : str
Text input.
librarian : dict
Dictionary with stem valid words.
Returns
-------
vectorized_array : list
List with the frequency of the Text input.
"""
vectorized_array = [0] * len(librarian)
for word in txt:
if len(word) > 0:
stem = self.stemmer.stem(word)
if stem in librarian:
position = librarian[stem]
vectorized_array[position] += 1
return vectorized_array
def fit(self, librarian):
"""This function fits the chosen model.
Parameters
----------
librarian : dict
Dictionary with stem valid words.
Returns
-------
model : sklearn.Model
Fitted model.
"""
x = [self.vectorise(txt, librarian) for txt in self.df_token]
y = self.df_tags
return self.model.fit(x, y)
def pred(self, model, phrase, librarian):
"""This function makes prediction for the given text input.
Parameters
----------
model : sklearn.Model
Fitted model.
phrase : str
Inputted text.
librarian : dict
Dictionary with stem valid words.
Returns
-------
x[0] : str
Answer for the given text input.
"""
phrase_ = self.vectorise(nltk.tokenize.word_tokenize(phrase), librarian)
x = model.predict([phrase_])
return x[0]
def __call__(self, *args, **kwargs):
"""Main function
Parameters
----------
args
kwargs
Returns
-------
"""
self.__init__(MultinomialNB)
librarian = self.cleaning_dict()
model = self.fit(librarian)
while True:
phrase = input('Input a phrase: ')
print(self.pred(model, phrase, librarian))
if __name__ == '__main__':
Message().__call__()
| [
2,
0,
12,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
13083,
1330,
26363,
198,
6738,
1341,
35720,
13,
2616,
425,
62,
24406,
274,
1330,
7854,
259,
49070,
32819,
198,
673... | 2.147982 | 2,230 |
import numpy as np
import unittest
from linear_solver.core import solve_linear_system
from linear_solver.utils.general import get_fn
if __name__ == '__main__':
unittest.main()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
555,
715,
395,
198,
198,
6738,
14174,
62,
82,
14375,
13,
7295,
1330,
8494,
62,
29127,
62,
10057,
198,
6738,
14174,
62,
82,
14375,
13,
26791,
13,
24622,
1330,
651,
62,
22184,
628,
198,
198,
... | 2.920635 | 63 |
'''
Created on Jan 19, 2015
@author: jcabezas
'''
import unittest
import figplotter.utils as orig
import figplotter.plot.defaults as orig_defaults
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | [
7061,
6,
198,
41972,
319,
2365,
678,
11,
1853,
198,
198,
31,
9800,
25,
474,
66,
397,
8471,
292,
198,
7061,
6,
198,
11748,
555,
715,
395,
198,
198,
11748,
2336,
29487,
353,
13,
26791,
355,
1796,
198,
11748,
2336,
29487,
353,
13,
29... | 2.578947 | 95 |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["test_trivial_solver", "test_basic_solver", "test_hodlr_solver"]
import numpy as np
import george
from george.utils import nd_sort_samples
from george import kernels
from george import TrivialSolver, BasicSolver, HODLRSolver
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
7297,
11,
3601,
62,
8818,
198,
198,
834,
439,
834,
796,
14631,
9288,
62,
83,
15104,
498,
62,
82,
14375,
1600,
366,
9288,
62,
35487,... | 2.8125 | 112 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# conftest.py
#
# Copyright 2020 QuatroPe
#
# This file is part of ProperImage (https://github.com/quatrope/ProperImage)
# License: BSD-3-Clause
# Full Text: https://github.com/quatrope/ProperImage/blob/master/LICENSE.txt
#
"""
Pytest configuration
Written by Bruno SANCHEZ, JB Cabral
PhD of Astromoy - UNC
bruno@oac.unc.edu.ar
Instituto de Astronomia Teorica y Experimental (IATE) UNC
Cordoba - Argentina Of 301
"""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from numpy.random import default_rng
from properimage import SingleImage, simtools
import pytest
# =============================================================================
# CONSTANTS
# =============================================================================
# FIX the random state
random = default_rng(seed=42)
# =============================================================================
# FIXTURES
# =============================================================================
@pytest.fixture
@pytest.fixture
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
220,
369,
701,
395,
13,
9078,
198,
2,
198,
2,
220,
15069,
12131,
2264,
47756,
6435,
198,
2,
198,
2,
77... | 4.07483 | 294 |
#!/usr/bin/python3
import ibm_db
import getopt
import sys
import os
from toposort import toposort_flatten
db = None
host = "localhost"
port = "50000"
user = None
pwd = None
outfile = None
targetdb = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:d:P:u:p:o:t:")
except getopt.GetoptError:
sys.exit(-1)
for o, a in opts:
if o == "-d":
db = a
if o == "-h":
host = a
if o == "-P":
port = a
if o == "-u":
user = a
if o == "-p":
pwd = a
if o == "-t":
targetdb = a
if db is None or user is None or pwd is None or targetdb is None:
print("Usage: DBMove.py [-h <host> -P <port>] -d <db> -u <user> -p <pwd> -t <target>")
sys.exit(1)
db = db.upper()
targetdb = targetdb.upper()
cfg = (db, host, port, user, pwd)
conn = ibm_db.connect("DATABASE=%s; HOSTNAME=%s; PORT=%s; PROTOCOL=TCPIP; UID=%s; PWD=%s" % cfg, "", "")
get_db_type = "values nya.get_db_type()"
find_edges = """
SELECT rtrim(t.tabschema) || '.' || rtrim(t.tabname)
, coalesce(rtrim(r.reftabschema) || '.' || rtrim(r.reftabname), 'dummy')
FROM syscat.tables t
LEFT JOIN syscat.references r
ON (t.tabschema, t.tabname) = (r.tabschema, r.tabname)
WHERE t.tabschema not like 'SYS%'
AND t.type = 'T'
AND rtrim(t.tabschema) not like 'NYA_%'
AND t.tabschema <> 'TMP'
ORDER BY 1
"""
identity_skip = """
select rtrim(tabschema) || '.' || rtrim(tabname) from syscat.columns
where identity = 'Y' and generated = 'D'
"""
stmt = ibm_db.prepare(conn, get_db_type)
ibm_db.execute(stmt, ())
tpl = ibm_db.fetch_tuple(stmt)
db_type = tpl[0]
edges = dict()
stmt = ibm_db.prepare(conn, find_edges)
ibm_db.execute(stmt, ())
tpl = ibm_db.fetch_tuple(stmt)
while tpl:
n1, n2 = tpl
try:
edges[n1].add(n2)
except KeyError:
edges[n1] = set()
edges[n1].add(n2)
tpl = ibm_db.fetch_tuple(stmt)
sorted_nodes = list(toposort_flatten(edges))
# print(sorted_nodes)
identity_skip_arr = []
edges = dict()
stmt = ibm_db.prepare(conn, identity_skip)
ibm_db.execute(stmt, ())
tpl = ibm_db.fetch_tuple(stmt)
while tpl:
identity_skip_arr.append(tpl[0])
tpl = ibm_db.fetch_tuple(stmt)
# print(identity_skip)
os.makedirs(db, exist_ok=True)
export_file = open("%s/export.sql" % db, "w")
load_file = open("%s/load.sql" % db, "w")
export_file.write("connect to %s;\n" % db)
load_file.write("connect to %s;\n" % targetdb)
if db_type == "N":
load_file.write("""set integrity for nya.person off;\n""")
load_file.write("""alter table nya.person
alter column EMAIL_UC drop generated
alter column NORMALIZED_FIRSTNAME drop generated
alter column NORMALIZED_LASTNAME drop generated;\n""")
load_file.write("""set integrity for nya.person immediate checked;\n""")
for t in sorted_nodes:
if t == "dummy":
continue
export_file.write("export to %s.ixf of ixf lobs to . modified by codepage=819 messages export_%s.msg select * from %s;\n" % (t,t,t))
identityskip = "identityoverride"
if t in identity_skip_arr:
identityskip = " "
load_file.write("load from %s.ixf of ixf lobs from . modified by generatedoverride %s messages load_%s.msg replace into %s;\n" % (t, identityskip, t, t))
if db_type == "N":
load_file.write("""set integrity for nya.person off;\n""")
load_file.write("""alter table nya.person
alter column EMAIL_UC set generated always as ( upper(email))
alter column NORMALIZED_FIRSTNAME set generated always as ( NYA.REMOVE_DIACRITICS( FIRSTNAME ) )
alter column NORMALIZED_LASTNAME set generated always as ( NYA.REMOVE_DIACRITICS( LASTNAME ) );\n""")
load_file.write("""set integrity for nya.person immediate checked force generated;\n""")
load_file.write("""echo set integrity for all tables;\n""")
export_file.write("connect reset;\n")
load_file.write("connect reset;\n")
export_file.close()
load_file.close()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
24283,
76,
62,
9945,
198,
11748,
651,
8738,
198,
11748,
25064,
198,
11748,
28686,
198,
6738,
1353,
418,
419,
1330,
1353,
418,
419,
62,
2704,
41769,
198,
198,
9945,
796,
6045,
... | 2.168365 | 1,865 |
from pyspark import SparkFiles
from pyspark.sql import SparkSession, DataFrameWriter
from pyspark.sql.functions import when, isnull, col, explode, split
import os | [
6738,
279,
893,
20928,
1330,
17732,
25876,
198,
6738,
279,
893,
20928,
13,
25410,
1330,
17732,
36044,
11,
6060,
19778,
34379,
198,
6738,
279,
893,
20928,
13,
25410,
13,
12543,
2733,
1330,
618,
11,
2125,
724,
11,
951,
11,
22818,
11,
66... | 3.543478 | 46 |
""" Image utilities. """
import numpy as np
import torch
import torch.nn as nn
import torchvision.utils
import torchvision.transforms.functional as F
def rgb2tensor(img, normalize=True):
""" Converts a RGB image to tensor.
Args:
img (np.array or list of np.array): RGB image of shape (H, W, 3) or a list of images
normalize (bool): If True, the tensor will be normalized to the range [-1, 1]
Returns:
torch.Tensor or list of torch.Tensor: The converted image tensor or a list of converted tensors.
"""
if isinstance(img, (list, tuple)):
return [rgb2tensor(o) for o in img]
tensor = F.to_tensor(img)
if normalize:
tensor = F.normalize(tensor, [0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
return tensor.unsqueeze(0)
def bgr2tensor(img, normalize=True):
""" Converts a BGR image to tensor.
Args:
img (np.array or list of np.array): BGR image of shape (H, W, 3) or a list of images
normalize (bool): If True, the tensor will be normalized to the range [-1, 1]
Returns:
torch.Tensor or list of torch.Tensor: The converted image tensor or a list of converted tensors.
"""
if isinstance(img, (list, tuple)):
return [bgr2tensor(o, normalize) for o in img]
return rgb2tensor(img[:, :, ::-1].copy(), normalize)
def unnormalize(tensor, mean, std):
"""Normalize a tensor image with mean and standard deviation.
See :class:`~torchvision.transforms.Normalize` for more details.
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channely.
Returns:
Tensor: Normalized Tensor image.
"""
for t, m, s in zip(tensor, mean, std):
t.mul_(s).add_(m)
return tensor
def tensor2rgb(img_tensor):
""" Convert an image tensor to a numpy RGB image.
Args:
img_tensor (torch.Tensor): Tensor image of shape (3, H, W)
Returns:
np.array: RGB image of shape (H, W, 3)
"""
output_img = unnormalize(img_tensor.clone(), [0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
output_img = output_img.squeeze().permute(1, 2, 0).cpu().numpy()
output_img = np.round(output_img * 255).astype('uint8')
return output_img
def tensor2bgr(img_tensor):
""" Convert an image tensor to a numpy BGR image.
Args:
img_tensor (torch.Tensor): Tensor image of shape (3, H, W)
Returns:
np.array: BGR image of shape (H, W, 3)
"""
output_img = tensor2rgb(img_tensor)
output_img = output_img[:, :, ::-1]
return output_img
def make_grid(*args, cols=8):
""" Create an image grid from a batch of images.
Args:
*args: (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
or a list of images all of the same size
cols: The maximum number of columns in the grid
Returns:
torch.Tensor: The grid of images.
"""
assert len(args) > 0, 'At least one input tensor must be given!'
imgs = torch.cat([a.cpu() for a in args], dim=2)
return torchvision.utils.make_grid(imgs, nrow=cols, normalize=True, scale_each=False)
def create_pyramid(img, n=1):
""" Create an image pyramid.
Args:
img (torch.Tensor): An image tensor of shape (B, C, H, W)
n (int): The number of pyramids to create
Returns:
list of torch.Tensor: The computed image pyramid.
"""
# If input is a list or tuple return it as it is (probably already a pyramid)
if isinstance(img, (list, tuple)):
return img
pyd = [img]
for i in range(n - 1):
pyd.append(nn.functional.avg_pool2d(pyd[-1], 3, stride=2, padding=1, count_include_pad=False))
return pyd
| [
37811,
7412,
20081,
13,
37227,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
10178,
13,
26791,
198,
11748,
28034,
10178,
13,
7645,
23914,
13,
45124,
355,
376,
... | 2.454839 | 1,550 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__FallbackSample__
Example of a report from a job that had xrootd fallback reads
"""
from WMCore.Configuration import ConfigSection
from WMCore.FwkJobReport.Report import Report
FrameworkJobReport = ConfigSection("FrameworkJobReport")
FrameworkJobReport.task = '/Run195530-PhotonHad-Run2012B-PromptReco-v1-PhotonHad/DataProcessing'
FrameworkJobReport.workload = 'Unknown'
FrameworkJobReport.section_('cmsRun1')
FrameworkJobReport.cmsRun1.status = 1
FrameworkJobReport.cmsRun1.section_('cleanup')
FrameworkJobReport.cmsRun1.cleanup.section_('unremoved')
FrameworkJobReport.cmsRun1.cleanup.section_('removed')
FrameworkJobReport.cmsRun1.cleanup.removed.fileCount = 0
FrameworkJobReport.cmsRun1.section_('errors')
FrameworkJobReport.cmsRun1.section_('logs')
FrameworkJobReport.cmsRun1.section_('parameters')
FrameworkJobReport.cmsRun1.parameters.ReadBranches = ''
FrameworkJobReport.cmsRun1.outputModules = []
FrameworkJobReport.cmsRun1.section_('site')
FrameworkJobReport.cmsRun1.section_('analysis')
FrameworkJobReport.cmsRun1.analysis.section_('files')
FrameworkJobReport.cmsRun1.analysis.files.fileCount = 0
FrameworkJobReport.cmsRun1.section_('performance')
FrameworkJobReport.cmsRun1.performance.section_('memory')
FrameworkJobReport.cmsRun1.performance.section_('storage')
FrameworkJobReport.cmsRun1.performance.storage.writeTotalMB = 0
FrameworkJobReport.cmsRun1.performance.storage.readPercentageOps = 2.38888888889
FrameworkJobReport.cmsRun1.performance.storage.readAveragekB = 7421.23591442
FrameworkJobReport.cmsRun1.performance.storage.readTotalMB = 311.63393
FrameworkJobReport.cmsRun1.performance.storage.readNumOps = 18.0
FrameworkJobReport.cmsRun1.performance.storage.readCachePercentageOps = 0.0
FrameworkJobReport.cmsRun1.performance.storage.readMBSec = 0.0135009760282
FrameworkJobReport.cmsRun1.performance.storage.readMaxMSec = 3325.76
FrameworkJobReport.cmsRun1.performance.storage.readTotalSecs = 0
FrameworkJobReport.cmsRun1.performance.storage.writeTotalSecs = 0
FrameworkJobReport.cmsRun1.performance.section_('summaries')
FrameworkJobReport.cmsRun1.performance.section_('cpu')
FrameworkJobReport.cmsRun1.section_('skipped')
FrameworkJobReport.cmsRun1.skipped.section_('files')
FrameworkJobReport.cmsRun1.skipped.files.fileCount = 0
FrameworkJobReport.cmsRun1.skipped.section_('events')
FrameworkJobReport.cmsRun1.section_('input')
FrameworkJobReport.cmsRun1.input.section_('source')
FrameworkJobReport.cmsRun1.input.source.section_('files')
FrameworkJobReport.cmsRun1.input.source.files.section_('file0')
FrameworkJobReport.cmsRun1.input.source.files.file0.section_('runs')
FrameworkJobReport.cmsRun1.input.source.files.file0.input_source_class = 'PoolSource'
FrameworkJobReport.cmsRun1.input.source.files.file0.input_type = 'primaryFiles'
FrameworkJobReport.cmsRun1.input.source.files.file0.lfn = '/store/data/Run2012D/SingleElectron/AOD/PromptReco-v1/000/207/279/D43A5B72-1831-E211-895D-001D09F24763.root'
FrameworkJobReport.cmsRun1.input.source.files.file0.pfn = 'root://xrootd.unl.edu//store/data/Run2012D/SingleElectron/AOD/PromptReco-v1/000/207/279/D43A5B72-1831-E211-895D-001D09F24763.root'
FrameworkJobReport.cmsRun1.input.source.files.file0.catalog = ''
FrameworkJobReport.cmsRun1.input.source.files.file0.module_label = 'source'
FrameworkJobReport.cmsRun1.input.source.files.file0.guid = 'D43A5B72-1831-E211-895D-001D09F24763'
FrameworkJobReport.cmsRun1.input.source.files.file0.events = 1215
FrameworkJobReport.cmsRun1.input.source.files.fileCount = 1
FrameworkJobReport.cmsRun1.section_('output')
FrameworkJobReport.cmsRun1.section_('fallback')
FrameworkJobReport.cmsRun1.fallback.section_('files')
FrameworkJobReport.cmsRun1.fallback.files.section_('file0')
FrameworkJobReport.cmsRun1.fallback.files.file0.PhysicalFileName = 'root://xrootd.unl.edu//store/data/Run2012D/SingleElectron/AOD/PromptReco-v1/000/207/279/D43A5B72-1831-E211-895D-001D09F24763.root'
FrameworkJobReport.cmsRun1.fallback.files.file0.LogicalFileName = '/store/data/Run2012D/SingleElectron/AOD/PromptReco-v1/000/207/279/D43A5B72-1831-E211-895D-001D09F24763.root'
FrameworkJobReport.cmsRun1.fallback.files.fileCount = 1
FrameworkJobReport.cmsRun1.id = None
FrameworkJobReport.workload = 'Unknown'
FrameworkJobReport.steps = ['cmsRun1']
report = Report()
report.data = FrameworkJobReport
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
834,
24750,
1891,
36674,
834,
198,
198,
16281,
286,
257,
989,
422,
257,
1693,
326,
550,
2124,
15763,
67,
... | 2.828349 | 1,538 |
from pybunpro import UserInformation
| [
6738,
12972,
65,
403,
1676,
1330,
11787,
21918,
628
] | 4.222222 | 9 |
#!/usr/bin/env python2
from math import sqrt, pow
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
6738,
10688,
1330,
19862,
17034,
11,
7182,
628,
628,
628,
628,
628,
198
] | 2.608696 | 23 |
'''
Copyright (c) 2015, Emmanuel Levijarvi
All rights reserved.
License BSD
'''
from datetime import datetime
from unittest import TestCase
import csv
import os
from iotrelay import Reading
from tests.tempodb_mock import Client, DataPoint
import iotrelay_tempodb
iotrelay_tempodb.Client = Client
iotrelay_tempodb.DataPoint = DataPoint
TIME_FMT = "%Y-%m-%d %H:%M:%S %z"
TIME_FMT = "%Y-%m-%d %H:%M:%S"
TEST_DATA = os.path.join(os.path.realpath(os.path.dirname(__file__)),
"test_data.csv")
| [
7061,
6,
198,
15269,
357,
66,
8,
1853,
11,
32390,
16042,
2926,
283,
8903,
198,
3237,
2489,
10395,
13,
198,
34156,
347,
10305,
198,
7061,
6,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
1... | 2.340909 | 220 |
from setuptools import setup, find_packages, Extension
REQUIRES = []
NAME = "pidevices"
VERSION = "0.0.1"
DEPENDENCIES = ['pyalsaaudio==0.8.4', 'picamera==1.13',
'rpi-ws281x==4.2.2', 'pygame==1.9.6',
'evdev==1.2.0', 'omxplayer-wrapper==0.3.2',
'RPi.GPIO==0.7.0', 'smbus2==0.2.3',
'python-periphery==1.1.2', 'spidev==3.4','pigpio==1.44']
vl53l1x_path = 'pidevices/sensors/vl53l1x/'
extension = Extension(
'vl53l1x_python',
define_macros=[],
extra_compile_args=['-std=c99'],
include_dirs=[vl53l1x_path,
vl53l1x_path + 'api/core',
vl53l1x_path + 'api/platform'],
libraries=[],
library_dirs=[],
sources=[vl53l1x_path + 'api/core/vl53l1_api_calibration.c',
vl53l1x_path + 'api/core/vl53l1_core.c',
vl53l1x_path + 'api/core/vl53l1_core_support.c',
vl53l1x_path + 'api/core/vl53l1_api_core.c',
vl53l1x_path + 'api/core/vl53l1_api_preset_modes.c',
vl53l1x_path + 'api/core/vl53l1_silicon_core.c',
vl53l1x_path + 'api/core/vl53l1_register_funcs.c',
vl53l1x_path + 'api/core/vl53l1_wait.c',
vl53l1x_path + 'api/core/vl53l1_error_strings.c',
vl53l1x_path + 'api/core/vl53l1_api_strings.c',
vl53l1x_path + 'api/core/vl53l1_api.c',
vl53l1x_path + 'api/platform/vl53l1_platform.c',
vl53l1x_path + 'python_lib/vl53l1x_python.c'])
# Build vl531l
# Lib sdl install
setup(
name=NAME,
version=VERSION,
packages=find_packages(),
# Install required packages
install_requires=DEPENDENCIES,
ext_modules=[extension],
# Metadata
author="Iasonas Paraskevopoulos",
author_email="iaswnparaskev@gmail.com",
description="Drivers for sensors and actuators for the raspberry pi board.",
url=" ",
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
11,
27995,
198,
198,
2200,
10917,
4663,
1546,
796,
17635,
198,
20608,
796,
366,
79,
485,
85,
1063,
1,
198,
43717,
796,
366,
15,
13,
15,
13,
16,
1,
198,
198,
46162,
10619,
2... | 1.828215 | 1,042 |
from typing import cast
from aries_cloudagent.core.profile import ProfileSession
from aries_cloudagent.indy.holder import IndyHolderError
from aries_cloudagent.indy.models.requested_creds import (
IndyRequestedCredsRequestedAttrSchema,
IndyRequestedCredsRequestedPredSchema,
)
from aries_cloudagent.ledger.error import LedgerError
from aries_cloudagent.messaging.base_handler import BaseResponder, RequestContext
from aries_cloudagent.messaging.models.base import BaseModelError
from aries_cloudagent.protocols.present_proof.v1_0.manager import PresentationManager
from aries_cloudagent.protocols.present_proof.v1_0.models.presentation_exchange import (
V10PresentationExchange as PresExRecord,
)
from aries_cloudagent.storage.error import StorageError, StorageNotFoundError
from aries_cloudagent.wallet.error import WalletNotFoundError
from marshmallow import fields
from ....util import (
ExceptionReporter,
InvalidConnection,
admin_only,
expand_message_class,
get_connection,
log_handling,
)
from ..error import InvalidPresentationExchange
from .base import AdminHolderMessage
from .pres_sent import PresSent
@expand_message_class
class PresRequestApprove(AdminHolderMessage):
"""Approve presentation request."""
message_type = "presentation-request-approve"
class Fields:
"""Fields on pres request approve message."""
presentation_exchange_id = fields.Str(required=True)
self_attested_attributes = fields.Dict(
description="Self-attested attributes to build into proof",
required=True,
keys=fields.Str(example="attr_name"), # marshmallow/apispec v3.0 ignores
values=fields.Str(
example="self_attested_value",
description=(
"Self-attested attribute values to use in requested-credentials "
"structure for proof construction"
),
),
)
requested_attributes = fields.Dict(
description=(
"Nested object mapping proof request attribute referents to "
"requested-attribute specifiers"
),
required=True,
keys=fields.Str(
example="attr_referent"
), # marshmallow/apispec v3.0 ignores
values=fields.Nested(IndyRequestedCredsRequestedAttrSchema()),
)
requested_predicates = fields.Dict(
description=(
"Nested object mapping proof request predicate referents to "
"requested-predicate specifiers"
),
required=True,
keys=fields.Str(
example="pred_referent"
), # marshmallow/apispec v3.0 ignores
values=fields.Nested(IndyRequestedCredsRequestedPredSchema()),
)
comment = fields.Str(
required=False,
description="Optional comment.",
example="Nothing to see here.",
)
@staticmethod
async def get_pres_ex_record(
session: ProfileSession, pres_ex_id: str
) -> PresExRecord:
"""Retrieve a presentation exchange record and validate its state."""
try:
pres_ex_record = await PresExRecord.retrieve_by_id(session, pres_ex_id)
pres_ex_record = cast(PresExRecord, pres_ex_record)
except StorageNotFoundError as err:
raise InvalidPresentationExchange(
"Presentation exchange ID not found"
) from err
if pres_ex_record.state != (PresExRecord.STATE_REQUEST_RECEIVED):
raise InvalidPresentationExchange(
"Presentation must be in request received state"
)
return pres_ex_record
@log_handling
@admin_only
async def handle(self, context: RequestContext, responder: BaseResponder):
"""Handle presentation request approved message."""
async with context.session() as session:
async with ExceptionReporter(
responder, InvalidPresentationExchange, context.message
):
pres_ex_record = await self.get_pres_ex_record(
session, self.presentation_exchange_id
)
async with ExceptionReporter(responder, InvalidConnection, context.message):
conn_record = await get_connection(
session, pres_ex_record.connection_id
)
presentation_manager = PresentationManager(context.profile)
async with ExceptionReporter(
responder,
(
BaseModelError,
IndyHolderError,
LedgerError,
StorageError,
WalletNotFoundError,
),
context.message,
):
pres_ex_record, message = await presentation_manager.create_presentation(
pres_ex_record,
{
"self_attested_attributes": self.self_attested_attributes,
"requested_attributes": self.requested_attributes,
"requested_predicates": self.requested_predicates,
},
comment=self.comment,
)
await responder.send(message, connection_id=conn_record.connection_id)
presentation_sent = PresSent(record=pres_ex_record)
presentation_sent.assign_thread_from(self)
await responder.send_reply(presentation_sent)
| [
6738,
19720,
1330,
3350,
198,
198,
6738,
257,
1678,
62,
17721,
25781,
13,
7295,
13,
13317,
1330,
13118,
36044,
198,
6738,
257,
1678,
62,
17721,
25781,
13,
521,
88,
13,
13829,
1330,
26023,
39,
19892,
12331,
198,
6738,
257,
1678,
62,
17... | 2.304965 | 2,397 |
import itertools
if __name__ == '__main__':
main()
| [
11748,
340,
861,
10141,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.458333 | 24 |
import datetime
from django.core.urlresolvers import reverse
from django.utils.text import slugify
from neomodel import IntegerProperty, StructuredNode, StringProperty, db, DateProperty, RelationshipFrom, \
RelationshipTo, StructuredRel, Relationship, BooleanProperty
EDITABLE_PROPERTIES = {
# labels: {property-name, ...},
# Nodes
':Company': ['name'],
':CV': ['name', 'date', 'spec'],
':Experience': ['title', 'date', 'publish_date', 'summary', 'body'],
':Link': ['title', 'url', 'publish_date', 'summary'],
':Note': ['text', 'publish_date'],
':Person': ['name', 'contact_info', 'image_url'],
':Project': ['name', 'description'],
':Role': ['name', 'description', 'hidden'],
':Topic': ['name', 'description'],
# Relationships
'(:Person)-[:CONTRIBUTED_TO]->(:Project)': ['start_date', 'end_date'],
'(:Person)-[:PERFORMED]->(:Role)': ['start_date', 'end_date'],
}
| [
11748,
4818,
8079,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
6738,
42625,
14208,
13,
26791,
13,
5239,
1330,
31065,
1958,
198,
6738,
497,
296,
375,
417,
1330,
34142,
21746,
11,
32112,
1522,
19667,
... | 2.663842 | 354 |
##############################################################################
# Collection Manipulators
# ============================================================================
##############################################################################
from typing import Mapping, Sequence
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
12251,
35045,
24325,
198,
2,
38093,
2559,
18604,
198,
29113,
29113,
7804,
4242,
2235,
198,
198,
6738,
19720,
1330,
337,
5912,
11,
45835,
628
] | 9.741935 | 31 |
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .vk import VKIE
from ..utils import (
HEADRequest,
int_or_none,
)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
764,
11321,
1330,
14151,
11627,
40450,
198,
6738,
764,
85,
74,
1330,
45917,
10008,
198,
6738,
11485,
26791,
1330,
357,
19... | 2.803279 | 61 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines TestPackageExecutable to help run stand-alone executables."""
import logging
import os
import posixpath
import sys
import tempfile
from pylib import cmd_helper
from pylib import constants
from pylib import pexpect
from pylib.device import device_errors
from pylib.gtest import gtest_test_instance
from pylib.gtest.test_package import TestPackage
class TestPackageExecutable(TestPackage):
"""A helper class for running stand-alone executables."""
_TEST_RUNNER_RET_VAL_FILE = 'gtest_retval'
def __init__(self, suite_name):
"""
Args:
suite_name: Name of the test suite (e.g. base_unittests).
"""
TestPackage.__init__(self, suite_name)
self.suite_path = os.path.join(constants.GetOutDirectory(), suite_name)
self._symbols_dir = os.path.join(constants.GetOutDirectory(),
'lib.target')
#override
@staticmethod
#override
#override
#override
#override
#override
| [
2,
15069,
357,
66,
8,
2321,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
1... | 2.81592 | 402 |
import os
if __name__ == "__main__":
run_tests() | [
11748,
28686,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1057,
62,
41989,
3419
] | 2.409091 | 22 |
from abc import ABCMeta
from django.urls import reverse
from lib.tests.utils import ClientTest, sample_image_as_file
from ..models import LabelGroup, Label
# Abstract class
| [
6738,
450,
66,
1330,
9738,
48526,
198,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
198,
6738,
9195,
13,
41989,
13,
26791,
1330,
20985,
14402,
11,
6291,
62,
9060,
62,
292,
62,
7753,
198,
6738,
11485,
27530,
1330,
36052,
132... | 3.54 | 50 |
def convert_kwargs_to_cmd_line_args(kwargs):
"""
Helper function to build command line arguments out of dict.
"""
args = []
for k in sorted(kwargs.keys()):
v = kwargs[k]
args.append('-{}'.format(k))
if v is not None:
args.append('{}'.format(v))
return args
def get_list_attribute(_object):
"""
Return value list without built-in attribute.
"""
return [v for k, v in _object.__dict__.items() if not k.startswith("__")]
| [
198,
198,
4299,
10385,
62,
46265,
22046,
62,
1462,
62,
28758,
62,
1370,
62,
22046,
7,
46265,
22046,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
5053,
525,
2163,
284,
1382,
3141,
1627,
7159,
503,
286,
8633,
13,
198,
220,
220... | 2.338028 | 213 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'addambiencesimpledialog.ui'
#
# Created by: PyQt4 UI code generator 4.11.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
try:
_encoding = QtGui.QApplication.UnicodeUTF8
except AttributeError:
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
2860,
4131,
10035,
320,
10137,
498,
519,
13,
9019,
6,
198,
2,
198,
2,
15622,
416,
25,
9485,
48,
83,
19,
12... | 2.689655 | 145 |
from django.shortcuts import render
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198
] | 4 | 9 |
import tensorflow as tf
from keras import layers, losses
from keras.utils.generic_utils import register_keras_serializable
from keras.utils.losses_utils import ReductionV2 as Reduction
from keras.utils.tf_utils import shape_type_conversion
from .sample import point_sample
@register_keras_serializable(package='SegMe>PointRend')
| [
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
41927,
292,
1330,
11685,
11,
9089,
198,
6738,
41927,
292,
13,
26791,
13,
41357,
62,
26791,
1330,
7881,
62,
6122,
292,
62,
46911,
13821,
198,
6738,
41927,
292,
13,
26791,
13,
22462,
274,
... | 3.484211 | 95 |
import logging
from datetime import datetime, timedelta, date
from xml.sax.saxutils import unescape
from epg2xml.providers import EPGProvider, EPGProgram
from epg2xml.providers import ParserBeautifulSoup as BeautifulSoup
from epg2xml.utils import request_data
log = logging.getLogger(__name__.rsplit(".", maxsplit=1)[-1].upper())
today = date.today()
# TODO: better to parsing desktop page?
| [
11748,
18931,
201,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
11,
3128,
201,
198,
6738,
35555,
13,
82,
897,
13,
82,
897,
26791,
1330,
555,
41915,
201,
198,
201,
198,
6738,
2462,
70,
17,
19875,
13,
15234,
4157,
1330,
... | 2.914286 | 140 |
''' using length to validate input
# ask for a six digit number and print to the console
flag = True
while flag == True:
try:
usr_num = input("Please enter a six digit number: ")
if float(usr_num):
if len(usr_num) > 6 :
print("Number is too big!")
elif len(usr_num) < 6:
print("Number is too small!")
else:
print(usr_num)
flag = False
except:
print("you did not enter a number")
pass
'''
'''Superficial string traversal
fruit = 'banana'
index = 0
while index < len(fruit):
letter = fruit[index]
print(index, letter)
index = index + 1
fruit = 'Banana'
for letter in fruit :
print(letter)
'''
''' counting
word = 'banana'
count = 0
for letter in word :
if letter == 'a':
count = count + 1
print(count)
'''
''' counting vowles in a string
my_string = input("Insert a string: ")
count = 0
for letter in my_string:
if letter in ['a', 'o', 'u', 'i', 'e']:
count = count + 1
print(count)
'''
''' Check for existence with keyword in
fruit = 'oranged
if 'g' in fruit:
print('Might be grapefruit!')
elif 'o' in fruit:
print('Might be an oange')
'''
'''slice a string
my_string = input("Insert a string: ")
for letter in range(len(my_string)):
if my_string[letter] == '@':
new_string = my_string[letter + 1:]
break
'''
'''slice a string pt 2'''
my_string = input("Insert a string: ")
for letter in range(len(my_string)):
if my_string[letter] == '@':
break
new_string = ""
for letter2 in range (letter + 1, len(my_string)):
if my_string[letter2] == '@':
new_string = my_string[letter + 1: letter2]
break
print(new_string) | [
7061,
6,
1262,
4129,
284,
26571,
5128,
198,
2,
1265,
329,
257,
2237,
16839,
1271,
290,
3601,
284,
262,
8624,
198,
32109,
796,
6407,
198,
4514,
6056,
6624,
6407,
25,
198,
220,
220,
220,
1949,
25,
198,
220,
220,
220,
220,
220,
220,
... | 2.306789 | 766 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests of BOPES Sampler."""
import unittest
from functools import partial
import numpy as np
from qiskit.algorithms import NumPyMinimumEigensolver
from qiskit.utils import algorithm_globals
from qiskit_nature.algorithms import GroundStateEigensolver, BOPESSampler
from qiskit_nature.algorithms.pes_samplers import MorsePotential
from qiskit_nature.drivers import Molecule
from qiskit_nature.drivers.second_quantization import PySCFDriver
from qiskit_nature.mappers.second_quantization import ParityMapper
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
class TestBOPES(unittest.TestCase):
"""Tests of BOPES Sampler."""
def test_h2_bopes_sampler(self):
"""Test BOPES Sampler on H2"""
seed = 50
algorithm_globals.random_seed = seed
# Molecule
dof = partial(Molecule.absolute_distance, atom_pair=(1, 0))
m = Molecule(
geometry=[["H", [0.0, 0.0, 1.0]], ["H", [0.0, 0.45, 1.0]]],
degrees_of_freedom=[dof],
)
mapper = ParityMapper()
converter = QubitConverter(mapper=mapper, two_qubit_reduction=True)
driver = PySCFDriver(molecule=m)
problem = ElectronicStructureProblem(driver)
solver = NumPyMinimumEigensolver()
me_gss = GroundStateEigensolver(converter, solver)
# BOPES sampler
sampler = BOPESSampler(gss=me_gss)
# absolute internuclear distance in Angstrom
points = [0.7, 1.0, 1.3]
results = sampler.sample(problem, points)
points_run = results.points
energies = results.energies
np.testing.assert_array_almost_equal(points_run, [0.7, 1.0, 1.3])
np.testing.assert_array_almost_equal(
energies, [-1.13618945, -1.10115033, -1.03518627], decimal=2
)
def test_potential_interface(self):
"""Tests potential interface."""
seed = 50
algorithm_globals.random_seed = seed
stretch = partial(Molecule.absolute_distance, atom_pair=(1, 0))
# H-H molecule near equilibrium geometry
m = Molecule(
geometry=[
["H", [0.0, 0.0, 0.0]],
["H", [1.0, 0.0, 0.0]],
],
degrees_of_freedom=[stretch],
masses=[1.6735328e-27, 1.6735328e-27],
)
mapper = ParityMapper()
converter = QubitConverter(mapper=mapper)
driver = PySCFDriver(molecule=m)
problem = ElectronicStructureProblem(driver)
solver = NumPyMinimumEigensolver()
me_gss = GroundStateEigensolver(converter, solver)
# Run BOPESSampler with exact eigensolution
points = np.arange(0.45, 5.3, 0.3)
sampler = BOPESSampler(gss=me_gss)
res = sampler.sample(problem, points)
# Testing Potential interface
pot = MorsePotential(m)
pot.fit(res.points, res.energies)
np.testing.assert_array_almost_equal([pot.alpha, pot.r_0], [2.235, 0.720], decimal=3)
np.testing.assert_array_almost_equal([pot.d_e, pot.m_shift], [0.2107, -1.1419], decimal=3)
if __name__ == "__main__":
unittest.main()
| [
2,
770,
2438,
318,
636,
286,
1195,
1984,
270,
13,
198,
2,
198,
2,
357,
34,
8,
15069,
19764,
12131,
11,
33448,
13,
198,
2,
198,
2,
770,
2438,
318,
11971,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
13,
921,
743,
198,
2,
733... | 2.35746 | 1,575 |
import os
import django
from channels.routing import get_default_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE','locallibrarysettings')
django.setup()
application = get_default_application()
| [
11748,
28686,
198,
198,
11748,
42625,
14208,
198,
198,
6738,
9619,
13,
81,
13660,
1330,
651,
62,
12286,
62,
31438,
198,
198,
418,
13,
268,
2268,
13,
2617,
12286,
10786,
35028,
1565,
11230,
62,
28480,
51,
20754,
62,
33365,
24212,
41707,
... | 3.25 | 64 |