content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from email import header
import json
from flask import Flask, render_template, jsonify, request
from dotenv import load_dotenv
import os
import sys
import requests
# database intialization
from modules.database import NetworkDB
db = NetworkDB()
db.create_tables()
from modules.graph import Graphs
from modules.handler import NodeHandler
app = Flask(__name__)
defaultSession = "86ab42ee-8b2e-4b00-b306-e8483698ef2f"
@app.route("/")
@app.route("/save_nodes", methods=["POST"])
@app.route("/deploy_nodes", methods=["POST"])
@app.route("/destroy_nodes", methods=["POST"])
@app.route("/send_packet", methods=["POST"])
@app.route("/set_status", methods=["POST"])
@app.route("/get_status", methods=["GET"])
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=8080) | [
6738,
3053,
1330,
13639,
198,
11748,
33918,
198,
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
33918,
1958,
11,
2581,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
7007,
198,... | 2.739583 | 288 |
#!/usr/bin/env python3
import numpy
import pylab
import pickle
import itertools
a_arr, m_hist, cor_hist=pickle.load(open("results.pickle","rb"))
e=numpy.loadtxt("eigenval.csv", delimiter=",")
v=numpy.loadtxt("eigenvec.csv", delimiter=",")
v = v @ numpy.diag(e)
group_ans = numpy.loadtxt("group_hier.csv", delimiter=",", dtype=int)
if group_ans.ndim==1:
group_ans = group_ans.reshape([len(group_ans),1])
N, Nhier = group_ans.shape
Nsep = numpy.max(group_ans)+1
#representational similarity within communities
#LAM
matching_LAM=numpy.zeros([len(a_arr),Nhier+1])
for a_ind in range(len(a_arr)):
matching_LAM[a_ind,0] = calc_matching_all(cor_hist[a_ind])
for h in range(1,Nhier+1):
matching_LAM[a_ind,h] = calc_matching(cor_hist[a_ind], h)
#GL
p=numpy.min([50, v.shape[1]-1])
matching_GL=numpy.zeros([p,Nhier+1])
for i in range(p):
matching_GL[a_ind,0] = calc_matching_all(numpy.corrcoef(v[:,:i+2]))
for h in range(1,Nhier+1):
matching_GL[i,h] = calc_matching(numpy.corrcoef(v[:,:i+2]), h)
#plot
colors = ["gray", "red", "blue", "green"]
pylab.figure(figsize=(2.5,2.5))
pylab.plot(a_arr, matching_LAM[:,0],".-",label="All pairs",color=colors[0])
for h in range(1,Nhier+1):
pylab.plot(a_arr, matching_LAM[:,h],".-",label="Level "+str(h),color=colors[h])
pylab.xlim([-1,3])
pylab.xticks([-1,0,1,2])
pylab.xlabel(r"$\alpha$")
pylab.ylabel("Average pattern correlation")
pylab.tight_layout()
pylab.savefig("cor_cluster_LAM_nolegend.pdf")
pylab.legend()
pylab.savefig("cor_cluster_LAM.pdf")
pylab.close()
pylab.figure(figsize=(2.5,2.5))
pylab.plot(range(2,p+2), matching_GL[:,0],".-",label="All pairs",color=colors[0])
for h in range(1,Nhier+1):
pylab.plot(range(2,p+2), matching_GL[:,h],".-",label="Level "+str(h),color=colors[h])
pylab.xlim([0,p+2])
pylab.xlabel("Dimension of representations")
pylab.ylabel("Average pattern correlation")
pylab.tight_layout()
pylab.savefig("cor_cluster_GL_nolegend.pdf")
pylab.legend()
pylab.savefig("cor_cluster_GL.pdf")
pylab.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
299,
32152,
198,
11748,
279,
2645,
397,
198,
11748,
2298,
293,
198,
11748,
340,
861,
10141,
198,
198,
64,
62,
3258,
11,
285,
62,
10034,
11,
1162,
62,
10034,
28,
27729... | 2.144681 | 940 |
import database
import csv
import sklearn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import xlsxwriter as xl
import dill as pickle
from io import BytesIO
# This function is used to predict any model the user uploads to the flask app.
# Depending on user's settings the function returns a file with predicted values or
# displays the predicted values in a webpage.
| [
11748,
6831,
201,
198,
11748,
269,
21370,
201,
198,
11748,
1341,
35720,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
299,
32152,
355,
45941,
220,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
220,
2... | 3.223077 | 130 |
#!/bin/env python
import argparse
if __name__ == "__main__":
main()
| [
2,
48443,
8800,
14,
24330,
21015,
198,
11748,
1822,
29572,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.517241 | 29 |
from PIL import Image
if __name__ == "__main__":
print("Starting main")
main()
| [
6738,
350,
4146,
1330,
7412,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
3601,
7203,
22851,
1388,
4943,
198,
220,
1388,
3419,
198
] | 2.896552 | 29 |
"""For evaluating ARMA terms using an emulation
For evaluating ARMA terms using an emulation. Two classes, Arma and
ArmaForecast. Arma is for general use, where ARMA terms before time point
zero are zero. ArmaForecast is used for forecasting and continuing on a
time series, uses the previous time series when elevating time points
before zero (relatively).
Arma <- ArmaForecast
"""
import numpy as np
class Arma(object):
"""Evaluates ARMA terms
Evaluates ARMA terms. To be owned by instances of Parameter so that their
methods ar(), ma(), d_reg_ar() and d_reg_ma() can be called here. AR
and MA terms at the start of the time series are evaluated to be zero.
Attributes:
parameter: a Parameter instance which owns this arma
time_series: a TimeSeries instance which owns parameter
n_ar: number of autocorrelation terms
n_ma: number of moving average terms
"""
def ar_term(self, index):
"""AR term at a time step
Returns the AR term (vector) at a given time step. Each element
correspond to each lag [lag 1, lag 2, ... lag n_ar]
Args:
index: time step (t)
Returns:
vector of length self.n_ar
"""
ar = np.zeros(self.n_ar)
for i in range(self.n_ar):
index_lag = index-i-1 # lag term, eg one step behind for i=0
if index_lag >= 0:
ar[i] = self.parameter.ar(index_lag)
else:
ar[i] = 0
return ar
def ma_term(self, index):
"""MA term at a time step
Returns the MA term (vector) at a given time step. Each element
correspond to each lag [lag 1, lag 2, ... lag n_ma]
Args:
index: time step (t)
Returns:
vector of length self.n_ma
"""
time_series = self.time_series
ma = np.zeros(self.n_ma)
for i in range(self.n_ma):
index_lag = index-i-1 # lag term, eg one step behind for i=0
if index_lag >= 0:
ma[i] = self.parameter.ma(
time_series[index_lag],
time_series.z_array[index_lag],
time_series.poisson_rate[index_lag],
time_series.gamma_mean[index_lag],
time_series.gamma_dispersion[index_lag])
else:
ma[i] = 0
return ma
def d_reg_ar_term(self, index, key):
"""Derivative of the AR term with respect to a regression parameter
Return the derivative of the AR term \\sum_{i=1}^p\\phi_{i}\\Phi(t-i)
with respect to a regression parameter (eg \\beta, \\phi, \\theta,
k). The return value is a vector of the same shape as the
regression parameter
Args:
index: time step (t)
key: name of the regression parameter (eg "reg", "AR", "MA",
"const")
"""
grad = [] # array of gradient vectors, one for each AR lag
parameter = self.parameter
# for each AR lag
for i in range(self.n_ar):
index_lag = index - i - 1 # lag term, eg one step behind for i=0
# work out gradient if there is a term
if index_lag >= 0:
grad.append(
parameter["AR"][i] * parameter.d_reg_ar(index_lag, key))
# zero gradient if there is no term
else:
grad.append(np.zeros_like(parameter[key]))
# sum gradient over all AR lags
grad = np.asarray(grad)
grad = np.sum(grad, 0)
return grad
def d_reg_ma_term(self, index, key):
"""Derivative of the MA term with respect to a regression parameter
Return the derivative of the MA term
\\sum_{i=1}^q\\theta_{i}\\Theta(t-i) with respect to a regression
parameter (eg \\beta, \\phi, \\theta, k). The return value is a
vector of the same shape as the regression parameter
Args:
index: time step (t)
key: name of the regression parameter (eg "reg", "AR", "MA",
"const")
"""
grad = [] # array of gradient vectors, one for each AR lag
parameter = self.parameter
# for each MA lag
for i in range(self.n_ma):
index_lag = index - i - 1 # lag term, eg one step behind for i=0
# work out gradient if there is a term
if index_lag >= 0:
grad.append(
parameter["MA"][i] * parameter.d_reg_ma(index_lag, key))
# zero gradient if there is no term
else:
grad.append(np.zeros_like(parameter[key]))
grad = np.asarray(grad)
grad = np.sum(grad, 0)
return grad
class ArmaForecast(Arma):
"""Evaluates ARMA terms for forecasting
Evaluates ARMA terms. To be owned by instances of Parameter so that their
methods ar(index) and ma(index) can be called here. AR and MA terms at
the start of the time series are evaluated using the past (fitted) time
series.
"""
| [
37811,
1890,
22232,
5923,
5673,
2846,
1262,
281,
47065,
198,
198,
1890,
22232,
5923,
5673,
2846,
1262,
281,
47065,
13,
4930,
6097,
11,
943,
2611,
290,
198,
220,
220,
220,
943,
2611,
16351,
2701,
13,
943,
2611,
318,
329,
2276,
779,
11,... | 2.212884 | 2,344 |
import importlib
from channels import channel_layers
class Parser(object):
"""
Takes module_path as arguments path.module and also it has the function to
check the in-memory layer.
"""
| [
11748,
1330,
8019,
198,
6738,
9619,
1330,
6518,
62,
75,
6962,
628,
198,
4871,
23042,
263,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
33687,
8265,
62,
6978,
355,
7159,
3108,
13,
21412,
290,
635,
340,
468,
262,
216... | 3.327869 | 61 |
from dhis2 import Api, RequestException, setup_logger, logger
"""
Add "(updated)" to all Data Elements that contain "ANC" in its name.
Uses the method PUT.
Print errors if it failed.
"""
# Create a Api object
api = Api("play.dhis2.org/dev", "admin", "district")
# setup the logger
setup_logger()
if __name__ == "__main__":
main()
| [
6738,
288,
14363,
17,
1330,
5949,
72,
11,
19390,
16922,
11,
9058,
62,
6404,
1362,
11,
49706,
198,
198,
37811,
198,
4550,
30629,
43162,
16725,
284,
477,
6060,
26632,
326,
3994,
366,
20940,
1,
287,
663,
1438,
13,
198,
5842,
274,
262,
... | 2.881356 | 118 |
from Bio import SeqIO
import argparse
import os
parser = argparse.ArgumentParser(description='# This program renames sequences and trims primers from a sequence.')
parser.add_argument('-i','--in', dest='IN', help='Input folder.')
parser.add_argument('-o','--out', dest='OUT', help='Output folder.')
parser.add_argument('-r','--remove', dest='REMOVE', help='A an original file name part, that should be replaced by a suffix "_renamed.fastq".')
args = parser.parse_args()
################################################################################
# If there wasn't one already, adding forward slash '/'
if args.OUT[-1] != '/':
args.OUT = ''.join(args.OUT + '/')
if args.IN[-1] != '/':
args.IN = ''.join(args.IN + '/')
# If output folder doesn't exist, create one
if not os.path.exists(args.OUT):
os.makedirs(args.OUT)
os.chdir(args.IN)
files = os.listdir()
for entry in files:
renameSeq(entry, args)
| [
6738,
16024,
1330,
1001,
80,
9399,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
11213,
11639,
2,
770,
1430,
8851,
1047,
16311,
290,
220,
491,
12078,
2684,
364,
422,
257,
8379,... | 3.069079 | 304 |
# -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8
# =============================================================================
# $Id: template.py 2 2006-08-26 17:51:50Z s0undt3ch $
# =============================================================================
# $URL: http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/controllers/template.py $
# $LastChangedDate: 2006-08-26 18:51:50 +0100 (Sat, 26 Aug 2006) $
# $Rev: 2 $
# $LastChangedBy: s0undt3ch $
# =============================================================================
# Copyright (C) 2006 Ufsoft.org - Pedro Algarvio <ufs@ufsoft.org>
#
# Please view LICENSE for additional licensing information.
# =============================================================================
from ispmanccp.lib.base import *
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43907,
25,
1509,
28,
19,
40379,
28,
19,
277,
12685,
28,
40477,
12,
23,
198,
2,
38093,
25609,
198,
2,
720,
7390,
25,
11055,
13,
9078,
362,
4793,
12,
2919,
12,
20... | 3.4125 | 240 |
import os
from pathlib import Path
import parfive
| [
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
1582,
13261,
198
] | 3.642857 | 14 |
"""Dict with attribute-like access."""
from typing import Any, Dict, KeysView
from python_awair import const
class AttrDict(Dict[str, Any]):
"""Dict with attribute-like access.
For example, given an AttrDict *foo*, we could
access its values via *foo["bar"]* or *foo.bar*.
This is the parent class for the Sensors and Indices
classes, and as such it renames some properties to
friendlier names on initialization (but not anytime after).
"""
def __init__(self, attrs: Dict[str, Any]) -> None:
"""Initialize, hiding known sensor aliases."""
new_attrs = dict(attrs)
for key, value in attrs.items():
if key in const.SENSOR_TO_ALIAS:
new_attrs[const.SENSOR_TO_ALIAS[key]] = value
del new_attrs[key]
super().__init__(new_attrs)
def __getattr__(self, name: str) -> Any:
"""Return things in the dict via dot-notation."""
if name in self:
return self[name]
raise AttributeError()
def __setattr__(self, name: str, value: Any) -> None:
"""Set values in the dict via dot-notation."""
self[name] = value
def __delattr__(self, name: str) -> None:
"""Remove values from the dict via dot-notation."""
del self[name]
def __dir__(self) -> KeysView[str]:
"""Return dict keys as dir attributes."""
return self.keys()
| [
37811,
35,
713,
351,
11688,
12,
2339,
1895,
526,
15931,
198,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
26363,
7680,
198,
198,
6738,
21015,
62,
707,
958,
1330,
1500,
628,
198,
4871,
3460,
81,
35,
713,
7,
35,
713,
58,
2536,
11... | 2.483363 | 571 |
import cv2
img = cv2.imread("./data/test_images/straight_lines1.jpg")
cv2.imshow("Get Co-Ords", img)
cv2.waitKey()
| [
11748,
269,
85,
17,
198,
198,
9600,
796,
269,
85,
17,
13,
320,
961,
7,
1911,
14,
7890,
14,
9288,
62,
17566,
14,
42729,
62,
6615,
16,
13,
9479,
4943,
198,
33967,
17,
13,
320,
12860,
7203,
3855,
1766,
12,
35422,
82,
1600,
33705,
8... | 2.188679 | 53 |
import pytest
from osp.citations.jstor_record import JSTOR_Record
@pytest.mark.parametrize('inputs,surname', [
# Single author.
(
[
('David', 'McClure'),
],
'McClure'
),
# Multiple authors.
(
[
('David', 'McClure'),
('Joe', 'Karaganis'),
('Dennis', 'Tenen'),
],
'McClure'
),
# No authors.
(
[],
None
),
])
| [
198,
198,
11748,
12972,
9288,
198,
198,
6738,
267,
2777,
13,
66,
20597,
13,
73,
301,
273,
62,
22105,
1330,
449,
2257,
1581,
62,
23739,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
10786,
15414,
82,
11,
82,
700,
4... | 1.74717 | 265 |
from .. import bar_func | [
6738,
11485,
1330,
2318,
62,
20786
] | 3.833333 | 6 |
from marshmallow import Schema
from marshmallow.fields import Integer, String
| [
6738,
22397,
42725,
1330,
10011,
2611,
198,
6738,
22397,
42725,
13,
25747,
1330,
34142,
11,
10903,
628
] | 4.647059 | 17 |
#!/usr/bin/python3
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor
from time import sleep
if __name__ == "__main__":
motor_driver = Adafruit_MotorHAT(addr=0x60)
left_motor = motor_driver.getMotor(1)
right_motor = motor_driver.getMotor(2)
left_motor.setSpeed(255)
right_motor.setSpeed(255)
left_motor.run(Adafruit_MotorHAT.FORWARD)
right_motor.run(Adafruit_MotorHAT.FORWARD)
sleep(5)
left_motor.setSpeed(200)
right_motor.setSpeed(200)
left_motor.run(Adafruit_MotorHAT.BACKWARD)
right_motor.run(Adafruit_MotorHAT.BACKWARD)
sleep(5)
left_motor.run(Adafruit_MotorHAT.RELEASE)
right_motor.run(Adafruit_MotorHAT.RELEASE)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
6738,
1215,
1878,
4872,
62,
34919,
39,
1404,
1330,
1215,
1878,
4872,
62,
34919,
39,
1404,
11,
1215,
1878,
4872,
62,
9697,
34919,
198,
6738,
640,
1330,
3993,
628,
198,
361,
11593,
3... | 2.381625 | 283 |
import json
import scipy.io as scio
| [
11748,
33918,
198,
11748,
629,
541,
88,
13,
952,
355,
629,
952,
628
] | 2.846154 | 13 |
#1 Upper and Lower
ss = 'Hello, World'
print(ss.upper())
tt = ss.lower()
print(tt)
print(ss)
#2 Count, Strip, Replace
ss = ' Hello,World '
els = ss.count('l')
print(els)
print('***'+ss.strip()+'***') #Strip tira os espaços desnecessários do começo e do final
news = ss.replace ('o', '***')
print(news)
| [
2,
16,
20390,
290,
16048,
198,
198,
824,
796,
705,
15496,
11,
2159,
6,
198,
4798,
7,
824,
13,
45828,
28955,
198,
198,
926,
796,
37786,
13,
21037,
3419,
198,
4798,
7,
926,
8,
198,
4798,
7,
824,
8,
198,
198,
2,
17,
2764,
11,
185... | 2.348148 | 135 |
from tree_intersection import tree_intersection
from tree import BinaryTree, Node
| [
6738,
5509,
62,
3849,
5458,
1330,
5509,
62,
3849,
5458,
198,
6738,
5509,
1330,
45755,
27660,
11,
19081,
198
] | 4.315789 | 19 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Rico Sennrich
#
# This file is part of moses. Its use is licensed under the GNU Lesser General
# Public License version 2.1 or, at your option, any later version.
"""Add flexibility scores to a phrase table half.
You usually don't have to call this script directly; to add flexibility
scores to your model, run train-model.perl with the option
"--flexibility-score" (will only affect steps 5 and 6).
Usage:
python flexibility_score.py extract.context(.inv).sorted \
[--Inverse] [--Hierarchical] < phrasetable > output_file
"""
from __future__ import division
from __future__ import unicode_literals
import sys
import gzip
from collections import defaultdict
if __name__ == '__main__':
if len(sys.argv) < 1:
sys.stderr.write(
"Usage: "
"python flexibility_score.py extract.context(.inv).sorted "
"[--Inverse] [--Hierarchical] < phrasetable > output_file\n")
exit()
flexfile = sys.argv[1]
if '--Inverse' in sys.argv:
inverted = True
else:
inverted = False
if '--Hierarchical' in sys.argv:
hierarchical = True
else:
hierarchical = False
FS = FlexScore(inverted, hierarchical)
FS.main(sys.stdin, gzip.open(flexfile, 'r'), sys.stdout)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
1772,
25,
16707,
311,
1697,
7527,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
285,
4629,
13,
220,
6363,
779,
... | 2.631683 | 505 |
import pandas
from pandas.io.json import json_normalize
import matplotlib.pyplot
data_es = pandas.read_json('dataset_es.json')
data_en = pandas.read_json('dataset_en.json')
data_de = pandas.read_json('dataset_de.json')
data_it = pandas.read_json('dataset_it.json')
data = [juntarEstadisticas(data_es), juntarEstadisticas(data_en), juntarEstadisticas(data_de), juntarEstadisticas(data_it)]
df = pandas.DataFrame(data, columns = ["Liga", "Cantidad de Fechas", "GL", "GV", "T. Goles", "T. Partidos", "EGF", "EGP"])
print(df)
df.plot(x ='Liga', y='GL', kind = 'barh')
matplotlib.pyplot.title("Total de goles locales")
matplotlib.pyplot.xlabel("Liga")
matplotlib.pyplot.ylabel("Goles")
matplotlib.pyplot.show() | [
11748,
19798,
292,
198,
6738,
19798,
292,
13,
952,
13,
17752,
1330,
33918,
62,
11265,
1096,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
198,
198,
7890,
62,
274,
796,
19798,
292,
13,
961,
62,
17752,
10786,
19608,
292,
316,
62,
274,... | 2.398649 | 296 |
import itertools
import json
import hashlib
import requests
from django.utils.text import slugify
url = 'http://refugee-info-api-v2.prod.rescueapp.org/v2'
# url = 'http://localhost:8000/v2'
headers = {
'content-type': 'application/json',
'ServiceInfoAuthorization': 'token 3a4a15e8eef71821b75834d179be732368026c78'
}
countries = requests.get('{}/region/?level=1&simple'.format(url), headers=headers).json()
countries = dict({(c['slug'], c['id']) for c in countries})
regions = requests.get('http://api.refugee.info/v1/region').json()
print("Importing", len(regions))
regions = [r for r in regions if r['full_slug'].split('--')[0] == 'serbia']
regions = [r for r in regions if not r['hidden']]
for r in regions:
for c in r['content']:
c['page_slug'] = r['slug']
c['country_slug'] = r['full_slug'].split('--')[0]
c['hash'] = hashlib.sha224(c['section'].encode('utf-8')).hexdigest()
important = []
for c in r['important_information']:
if 'content' in c and c['content']:
main = c['content'][0]
main['sub'] = c['content'][1:]
main['slug'] = c['slug'] if 'slug' in c else slugify(c['title'])
main['__important'] = True
main['index'] = 10000
main['page_slug'] = r['slug']
main['country_slug'] = r['full_slug'].split('--')[0]
main['hash'] = hashlib.sha224(main['section'].encode('utf-8')).hexdigest()
important.append(main)
r['content'] += important
reduced = list(itertools.chain.from_iterable([r['content'] for r in regions]))
by_hash = {}
all_current = requests.get("{}/page/?status=staging".format(url), headers=headers).json()
all_current = dict({(c['slug'], json.dumps(c)) for c in all_current})
assignments = {}
for section in reduced:
# @print (section)
# print ('\n\n')
if 'slug' not in section or not section['slug']:
slug = section['anchor_name'] or slugify(section['title'])
# slug = slugify(section['title'])
section['slug'] = slug
else:
slug = section['slug']
if section['country_slug'] not in assignments:
assignments[section['country_slug']] = dict()
if section['page_slug'] not in assignments[section['country_slug']]:
assignments[section['country_slug']][section['page_slug']] = []
if section['hash'] in by_hash:
by_d = dict(assignments[section['country_slug']][section['page_slug']])
if not section['hash'] in by_d:
print("Assign {} to {}".format(slug, section['page_slug']))
assignments[section['country_slug']][section['page_slug']].append((section['hash'], section['index']))
else:
if slug in all_current:
current = json.loads(all_current[slug])
try:
current_html = "\n".join([c['content_object']['text'] for c in current['content_items']])
current_hash = hashlib.sha224(current_html.encode('utf-8')).hexdigest()
by_hash[current_hash] = current
if current_hash != section['hash']:
old_slug_len = len(
[k for k in [z['slug'] for z in by_hash.values()] if str(k).startswith(slug)])
if old_slug_len:
new_slug = "{}-{}".format(slug, old_slug_len)
else:
new_slug = slug
if new_slug not in all_current:
section['slug'] = new_slug
by_hash[section['hash']] = section
print("Add {} to db and assign to to {}".format(new_slug, section['page_slug']))
db = add_to_db(section)
all_current[new_slug] = json.dumps(db)
assignments[section['country_slug']][section['page_slug']].append(
(section['hash'], section['index']))
else:
try:
current = json.loads(all_current[new_slug])
current_html = "\n".join(
[c['content_object']['text'] for c in current['content_items']])
current_hash = hashlib.sha224(current_html.encode('utf-8')).hexdigest()
by_hash[current_hash] = current
section['slug'] = new_slug
print("Assign {} to {}".format(new_slug, section['page_slug']))
assignments[section['country_slug']][section['page_slug']].append(
(current_hash, section['index']))
except Exception as e:
print(e)
else:
section['slug'] = slug
print("Assign {} to {}".format(slug, section['page_slug']))
assignments[section['country_slug']][section['page_slug']].append(
(current_hash, section['index']))
except Exception as e1:
print(e1)
else:
print("Add new {} to db and assign to to {}".format(slug, section['page_slug']))
by_hash[section['hash']] = section
db = add_to_db(section)
all_current[slug] = json.dumps(db)
assignments[section['country_slug']][section['page_slug']].append((section['hash'], section['index']))
for section in [c for c in reduced if c['hide_from_toc']]:
if section['hash'] in by_hash:
db = by_hash[section['hash']]
if 'pop_up' not in db or db['pop_up'] != section['hide_from_toc']:
update_db(section)
for country_slug, p in assignments.items():
print(country_slug)
# country_slug= 'greece'
# p = assignments['greece']
for page_slug, items in p.items():
r = requests.get("{}/page_by_region/{}/?status=staging".format(url, page_slug), headers=headers).json()
sor = [dict(**by_hash[a[0]]) for a in sorted(items, key=lambda o: o[1])]
for i, s in enumerate(sor):
p = json.loads(all_current[s['slug']])
if 'content_items' in s:
del s['content_items']
s['id'] = p['id']
s['index'] = i
if 'id' in s:
s['page'] = s['id']
else:
print(s)
r['pages'] = sor
r2 = requests.put("{}/page_by_region/{}/?status=staging".format(url, country_slug), data=json.dumps(r),
headers=headers)
print(r2.status_code)
"""
# ONCE DONE WITH IMPORT RUN THIS CODE WHERE YOU HAVE ACCESS TO THE DB
from cms import models
from cms import utils
for p in models.Page.objects.all():
try:
p.publish()
#utils.push_to_transifex(p.slug)
except:
pass
r = p.regions_with_order.all()
c = set([list(a.region.parents)[-1].slug for a in r if len(list(a.region.parents)) > 0])
if len(c) > 1:
if p.limited_to:
p.limited_to = None
print('Limited to none', p.id)
p.save()
"""
| [
11748,
340,
861,
10141,
198,
11748,
33918,
198,
198,
11748,
12234,
8019,
198,
11748,
7007,
198,
6738,
42625,
14208,
13,
26791,
13,
5239,
1330,
31065,
1958,
198,
198,
6371,
796,
705,
4023,
1378,
5420,
2217,
68,
12,
10951,
12,
15042,
12,
... | 2.012655 | 3,556 |
"""
problem 3
The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143 ?
"""
# print(is_prime(42))
# print the prime factors
for k in generate_prime_factors(600851475143):
print(k)
| [
37811,
198,
198,
45573,
513,
198,
464,
6994,
5087,
286,
1511,
22186,
389,
642,
11,
767,
11,
1511,
290,
2808,
13,
198,
2061,
318,
262,
4387,
6994,
5766,
286,
262,
1271,
10053,
5332,
1415,
2425,
21139,
5633,
198,
198,
37811,
198,
220,
... | 2.242188 | 128 |
from django.contrib import admin
from home.models import Contact,Quotation
# Register your models here.
admin.site.register(Contact, ContactAdmin)
admin.site.register(Quotation) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
1363,
13,
27530,
1330,
14039,
11,
4507,
14221,
198,
2,
17296,
534,
4981,
994,
13,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198,
28482,
13,
15654,
13,
30238,
7,
1782... | 3.280702 | 57 |
from dsn.s_expr.clef import (
BecomeAtom,
SetAtom,
BecomeList,
Insert,
Extend,
)
| [
6738,
288,
16184,
13,
82,
62,
31937,
13,
2375,
69,
1330,
357,
198,
220,
220,
220,
31114,
2953,
296,
11,
198,
220,
220,
220,
5345,
2953,
296,
11,
198,
220,
220,
220,
31114,
8053,
11,
198,
220,
220,
220,
35835,
11,
198,
220,
220,
... | 2.019231 | 52 |
########################################################################
# Copyright 2018 FireEye Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################
import argparse
import cwmi
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='Full path to executable', required=True)
parsed_args = parser.parse_args()
create_process(parsed_args.path)
| [
29113,
29113,
7804,
198,
2,
15069,
2864,
3764,
24876,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 3.923695 | 249 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/bin/env python2.7
"""Send JPEG image to tensorflow_model_server loaded with inception model.
"""
from __future__ import print_function
# This is a placeholder for a Google-internal import.
from grpc.beta import implementations
import tensorflow as tf
from tensorflow.python.framework import tensor_util
# from tensorflow_serving.apis import tomtest_pb2
# from tensorflow_serving.apis import tomtest_grpc_pb2
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from tensorflow_serving.apis import olympian_master_grpc_pb2
from tensorflow_serving.apis import olympian_worker_grpc_pb2
import time
import numpy as np
import logging
logging.basicConfig()
from concurrent import futures
import grpc
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
MAX_MESSAGE_LENGTH = 1024 * 1024 * 64
# Master Class
if __name__ == '__main__':
tf.app.run() | [
2,
15069,
1584,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.494457 | 451 |
import codecs
import os
import argparse
import sys
if __name__ == '__main__':
main()
| [
11748,
40481,
82,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
25064,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
... | 2.183673 | 49 |
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SwedMedNER: A Named Entity Recognition Dataset on medical texts in Swedish"""
import re
import datasets
_CITATION = """\
@inproceedings{almgrenpavlovmogren2016bioner,
title={Named Entity Recognition in Swedish Medical Journals with Deep Bidirectional Character-Based LSTMs},
author={Simon Almgren, Sean Pavlov, Olof Mogren},
booktitle={Proceedings of the Fifth Workshop on Building and Evaluating Resources for Biomedical Text Mining (BioTxtM 2016)},
pages={1},
year={2016}
}
"""
_DESCRIPTION = """\
SwedMedNER is a dataset for training and evaluating Named Entity Recognition systems on medical texts in Swedish.
It is derived from medical articles on the Swedish Wikipedia, Läkartidningen, and 1177 Vårdguiden.
"""
_LICENSE = """\
Creative Commons Attribution-ShareAlike 4.0 International Public License (CC BY-SA 4.0)
See http://creativecommons.org/licenses/by-sa/4.0/ for the summary of the license.
"""
_URL = "https://github.com/olofmogren/biomedical-ner-data-swedish"
_DATA_URL = "https://raw.githubusercontent.com/olofmogren/biomedical-ner-data-swedish/master/"
class SwedishMedicalNerConfig(datasets.BuilderConfig):
"""BuilderConfig for SwedMedNER"""
def __init__(self, **kwargs):
"""
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(SwedishMedicalNerConfig, self).__init__(**kwargs)
class SwedishMedicalNer(datasets.GeneratorBasedBuilder):
"""SwedMedNER: A Named Entity Recognition Dataset on medical texts in Swedish"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="wiki", version=VERSION, description="The Swedish Wikipedia part of the dataset"),
datasets.BuilderConfig(name="lt", version=VERSION, description="The Läkartidningen part of the dataset"),
datasets.BuilderConfig(name="1177", version=VERSION, description="The 1177 Vårdguiden part of the dataset"),
]
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"wiki": _DATA_URL + "Wiki_annotated_60.txt",
"lt": _DATA_URL + "LT_annotated_60.txt",
"1177": _DATA_URL + "1177_annotated_sentences.txt",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
if self.config.name == "wiki":
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["wiki"]})
]
elif self.config.name == "lt":
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["lt"]})
]
elif self.config.name == "1177":
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["1177"]})
]
def _generate_examples(self, filepath):
"""Yields examples as (key, example) tuples."""
pattern = r"\[([^\[\]()]+)\]|\(([^\[\]()]+)\)|\{([^\[\]()]+)\}"
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
sentence = row.replace("\n", "")
if self.config.name == "1177":
targets = [
{
"start": m.start(0),
"end": m.end(0),
"text": sentence[m.start(0) + 2 : m.end(0) - 2],
"type": find_type(sentence[m.start(0)], sentence[m.end(0) - 1]),
}
for m in re.finditer(pattern, sentence)
]
yield id_, {
"sid": self.config.name + "_" + str(id_),
"sentence": sentence,
"entities": targets if targets else [],
}
else:
targets = [
{
"start": m.start(0),
"end": m.end(0),
"text": sentence[m.start(0) + 1 : m.end(0) - 1],
"type": find_type(sentence[m.start(0)], sentence[m.end(0) - 1]),
}
for m in re.finditer(pattern, sentence)
]
yield id_, {
"sid": self.config.name + "_" + str(id_),
"sentence": sentence,
"entities": targets if targets else [],
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
12131,
383,
12905,
2667,
32388,
16092,
292,
1039,
46665,
290,
262,
1459,
27039,
4226,
18920,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
3415... | 2.189627 | 2,410 |
import os, discord
from pathlib import Path
from dotenv import load_dotenv
from discord.ext import commands
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
client = commands.Bot(
command_prefix=os.getenv('BOT_PREFIX'),
owner_id=os.getenv('BOT_OWNER'),
case_insensitive=True,
help_command=None,
intents=discord.Intents.all(),
)
@client.event
client.run(os.getenv('BOT_TOKEN')) | [
11748,
28686,
11,
36446,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
198,
24330,
62,
6978,
796,
10644,
10786,
2637,
8,
1220,
45302,
24330,
6,
19... | 2.530488 | 164 |
# -*- coding: utf-8 -*-
"""
direct expansion units
"""
from __future__ import division
from scipy.interpolate import interp1d
from math import log, ceil
import pandas as pd
import numpy as np
from cea.constants import HEAT_CAPACITY_OF_WATER_JPERKGK
__author__ = "Shanshan Hsieh"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Shanshan Hsieh"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
# FIXME: this model is simplified, and required update
PRICE_DX_PER_W = 1.6 #USD FIXME: to be moved to database
# operation costs
# investment and maintenance costs
def calc_Cinv_DX(Q_design_W):
"""
Assume the same cost as gas boilers.
:type Q_design_W : float
:param Q_design_W: Design Load of Boiler in [W]
:param gV: globalvar.py
:rtype InvCa : float
:returns InvCa: Annualized investment costs in CHF/a including Maintenance Cost
"""
Capex_a = 0
Opex_fixed = 0
if Q_design_W > 0:
InvC = Q_design_W * PRICE_DX_PER_W
Inv_IR = 5 / 100
Inv_LT = 25
Inv_OM = 5 / 100
Capex_a = InvC * (Inv_IR) * (1 + Inv_IR) ** Inv_LT / ((1 + Inv_IR) ** Inv_LT - 1)
Opex_fixed = Capex_a * Inv_OM
return Capex_a, Opex_fixed | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
12942,
7118,
4991,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
629,
541,
88,
13,
3849,
16104,
378,
1330,
987,
79,
16,
67,
198,
67... | 2.456722 | 543 |
from tkinter import *
from tkinter import filedialog
from PIL import ImageTk, Image
# Declare some global variables to use
img_list = []
page = None
root = Tk()
root.title('Opening files')
r = 1
c = '3'
def open_file():
"""
Opens a list in a list.
Args:
"""
# Open images with GUI
global page
global img_list
global r
global c
root.filename = filedialog.askopenfilenames(initialdir="images/",
title="Select a file",
filetypes=(("All files", "*.*"),))
file_list = list(root.filename)
for name in file_list:
page = Image.open(name)
page = page.convert("RGB")
img_list.append(page)
my_label = Label(text=name, anchor='w').grid(row=r, column=str(c))
r += 1
def generate_pdf():
"""
Generate a pdf
Args:
"""
# Generate PDF
global page
global img_list
page = img_list[0]
page.save(r' '+ str(e.get()) + '.pdf', save_all=True, append_images=img_list[1:])
# Initialize Button widgets for tkinter
add_file_button = Button(root, text="Add file", command=open_file, width=15)
generate_button = Button(root, text="Generate PDF", command=generate_pdf, width=15)
file_name = Label(root, text="Enter your file name: ")
e = Entry(root)
# Align Buttons
add_file_button.grid(row=1, column='1')
generate_button.grid(row=2, column='1')
file_name.grid(row=3, column='1')
e.grid(row=3, column='2')
if __name__ == "__main__":
root.mainloop() | [
6738,
256,
74,
3849,
1330,
1635,
198,
6738,
256,
74,
3849,
1330,
5717,
498,
519,
198,
6738,
350,
4146,
1330,
7412,
51,
74,
11,
7412,
198,
198,
2,
16691,
533,
617,
3298,
9633,
284,
779,
198,
9600,
62,
4868,
796,
17635,
198,
7700,
7... | 2.521277 | 564 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import argparse
import os
import json
import glob
import random
import collections
import math
import time
from ops import *
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
... | 3.985714 | 70 |
import pandas as pd
import numpy as np
import io
import requests
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33245,
198,
11748,
7007,
628
] | 3.473684 | 19 |
"""
Root of the SkPy module. Classes from all submodules are imported here for convenience.
"""
from skpy.core import SkypeObj, SkypeObjs, SkypeEnum, SkypeException, SkypeApiException, SkypeAuthException
from skpy.util import SkypeUtils
from skpy.main import Skype, SkypeEventLoop, SkypeSettings, SkypeTranslator
from skpy.conn import SkypeConnection, SkypeAuthProvider, SkypeAPIAuthProvider, SkypeLiveAuthProvider, \
SkypeSOAPAuthProvider, SkypeRefreshAuthProvider, SkypeGuestAuthProvider, SkypeEndpoint
from skpy.user import SkypeUser, SkypeContact, SkypeBotUser, SkypeContacts, SkypeContactGroup, SkypeRequest
from skpy.chat import SkypeChat, SkypeSingleChat, SkypeGroupChat, SkypeChats
from skpy.msg import SkypeMsg, SkypeTextMsg, SkypeContactMsg, SkypeLocationMsg, SkypeCardMsg, \
SkypeFileMsg, SkypeImageMsg, SkypeCallMsg, SkypeMemberMsg, \
SkypeAddMemberMsg, SkypeChangeMemberMsg, SkypeRemoveMemberMsg
from skpy.event import SkypeEvent, SkypePresenceEvent, SkypeEndpointEvent, SkypeTypingEvent, \
SkypeMessageEvent, SkypeNewMessageEvent, SkypeEditMessageEvent, SkypeCallEvent, \
SkypeChatUpdateEvent, SkypeChatMemberEvent
| [
37811,
198,
30016,
286,
262,
3661,
20519,
8265,
13,
220,
38884,
422,
477,
850,
18170,
389,
17392,
994,
329,
15607,
13,
198,
37811,
198,
198,
6738,
1341,
9078,
13,
7295,
1330,
25254,
49201,
11,
25254,
5944,
8457,
11,
25254,
4834,
388,
... | 3.110276 | 399 |
from equals import equals
if __name__ == '__main__':
print(any(equals, 3, [1, 2, 3, 4])) | [
6738,
21767,
1330,
21767,
220,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
3601,
7,
1092,
7,
4853,
874,
11,
513,
11,
685,
16,
11,
362,
11,
513,
... | 2.125 | 48 |
import datetime
import json
import urllib
import numpy as np
import fix_yahoo_finance
import pandas as pd
import plotly
import plotly.graph_objs as go
from flask import Flask, jsonify, render_template, request,session
from pandas_datareader import data as pdr
from datetime import timedelta
from Store import dataStore
from flask_session import Session
app = Flask(__name__)
app.secret_key = 'render_templateqweaqdfsafASDAS,smaCount=smaCount)'
app.config['SESSION_PERMANENT'] = True
app.config['SESSION_TYPE'] = 'filesystem'
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=5)
# The maximum number of items the session stores
# before it starts deleting some, default 500
app.config['SESSION_FILE_THRESHOLD'] = 100
Session(app)
@app.route('/')
@app.route('/rule')
@app.route('/sma')
@app.route('/plotSMAs',methods=['POST'])
@app.route('/plotBoll',methods=['POST'])
@app.route('/getSymbols',methods=['POST'])
@app.route('/runRules',methods=['POST'])
if __name__=='__main__':
app.run()
| [
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
2956,
297,
571,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4259,
62,
40774,
62,
69,
14149,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
7110,
306,
198,
11748,
7110,
306,
13,
... | 2.824022 | 358 |
from graph import BCGraph
from .star_type import StarType
from fimath.geodesic import Geodesic
from fimath.constants import *
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', filename='log.log', level=logging.DEBUG)
| [
6738,
4823,
1330,
11843,
37065,
198,
6738,
764,
7364,
62,
4906,
1330,
2907,
6030,
198,
6738,
277,
320,
776,
13,
469,
4147,
291,
1330,
2269,
4147,
291,
198,
6738,
277,
320,
776,
13,
9979,
1187,
1330,
1635,
198,
198,
11748,
18931,
198,
... | 3.116883 | 77 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import ctypes
from ctypes.wintypes import BOOL, DWORD, HANDLE, LARGE_INTEGER, LPCSTR, UINT
from debugpy.common import log
JOBOBJECTCLASS = ctypes.c_int
LPDWORD = ctypes.POINTER(DWORD)
LPVOID = ctypes.c_void_p
SIZE_T = ctypes.c_size_t
ULONGLONG = ctypes.c_ulonglong
JobObjectExtendedLimitInformation = JOBOBJECTCLASS(9)
JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000
PROCESS_TERMINATE = 0x0001
PROCESS_SET_QUOTA = 0x0100
kernel32 = ctypes.windll.kernel32
kernel32.AssignProcessToJobObject.errcheck = _errcheck()
kernel32.AssignProcessToJobObject.restype = BOOL
kernel32.AssignProcessToJobObject.argtypes = (HANDLE, HANDLE)
kernel32.CreateJobObjectA.errcheck = _errcheck(lambda result: result == 0)
kernel32.CreateJobObjectA.restype = HANDLE
kernel32.CreateJobObjectA.argtypes = (LPVOID, LPCSTR)
kernel32.OpenProcess.errcheck = _errcheck(lambda result: result == 0)
kernel32.OpenProcess.restype = HANDLE
kernel32.OpenProcess.argtypes = (DWORD, BOOL, DWORD)
kernel32.QueryInformationJobObject.errcheck = _errcheck()
kernel32.QueryInformationJobObject.restype = BOOL
kernel32.QueryInformationJobObject.argtypes = (
HANDLE,
JOBOBJECTCLASS,
LPVOID,
DWORD,
LPDWORD,
)
kernel32.SetInformationJobObject.errcheck = _errcheck()
kernel32.SetInformationJobObject.restype = BOOL
kernel32.SetInformationJobObject.argtypes = (HANDLE, JOBOBJECTCLASS, LPVOID, DWORD)
kernel32.TerminateJobObject.errcheck = _errcheck()
kernel32.TerminateJobObject.restype = BOOL
kernel32.TerminateJobObject.argtypes = (HANDLE, UINT)
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
201,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
38559,
24290,
287,
262,
1628,
6808,
201,
198,
2,
329,
5964,
1321,
13,
201,
198,
201,
198,
6738,
11593,
37443,
834,
... | 2.598303 | 707 |
# Problem 17 : Number letter counts
liste_digit_letter = ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten',
'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen']
liste_ten_letter = ['twenty', 'thirty', 'forty',
'fifty', 'sixty', 'seventy', 'eighty', 'ninety']
liste_other_letter = ['hundred', 'and', 'thousand']
def number_to_value(number, liste_ten_letter=liste_ten_letter, liste_digit_letter=liste_digit_letter, liste_other_letter=liste_other_letter):
"""
This function takes a number, and 3 lists of words, and calculates
for this number the number of letter in its writting.
To do so, the function split the number into units, tens and hundreds
and do a case disjunction.
"""
if number == 0:
return 0
elif 1 <= number <= 19:
return len(liste_digit_letter[number-1])
elif 20 <= number <= 999:
number_unit = number % 10
number_ten = number // 10 % 10
number_hundreds = number // 100
if number_ten == 1:
value = number_to_value(number_ten*10+number_unit)
else:
value = number_to_value(number_unit)
if number_ten not in [0, 1]:
value += len(liste_ten_letter[number_ten-2])
if number_hundreds != 0:
value += len(liste_other_letter[0]) + \
len(liste_digit_letter[number_hundreds-1])
if number_hundreds != 0 and (number_ten != 0 or number_unit != 0):
value += len(liste_other_letter[1])
return value
else:
return len(liste_other_letter[2]) + len(liste_digit_letter[0])
def number_letter_counts():
"""
This function calculates the sum of every number's value, for number beetwen
1 and 1000. It uses the function number_to_value defined above.
"""
count = 0
for i in range(1, 1001):
print("i : ", i, "value : ", number_to_value(i))
count += number_to_value(i)
return count
print(number_letter_counts())
# Result 21124
| [
2,
20647,
1596,
1058,
7913,
3850,
9853,
628,
198,
4868,
68,
62,
27003,
62,
9291,
796,
37250,
505,
3256,
705,
11545,
3256,
705,
15542,
3256,
705,
14337,
3256,
705,
13261,
3256,
705,
19412,
3256,
705,
26548,
3256,
705,
26022,
3256,
705,
... | 2.392405 | 869 |
"""empty message
Revision ID: 3cbc86a0a9d7
Revises: 77894fcde804
Create Date: 2018-09-27 12:25:31.893545
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3cbc86a0a9d7'
down_revision = '77894fcde804'
branch_labels = None
depends_on = None
| [
37811,
28920,
3275,
198,
198,
18009,
1166,
4522,
25,
513,
66,
15630,
4521,
64,
15,
64,
24,
67,
22,
198,
18009,
2696,
25,
767,
3695,
5824,
16072,
2934,
36088,
198,
16447,
7536,
25,
2864,
12,
2931,
12,
1983,
1105,
25,
1495,
25,
3132,
... | 2.430894 | 123 |
'''
CRUD = Create Read Update Delete
Add necessary sql functions here to use throughout the project.
'''
import pandas as pd
from sqlalchemy import MetaData, Table, Column, Integer, Float, String
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
from sqlalchemy.sql import select
import app.models as models
import app.schemas as schemas
from app.database import engine, inspector, metadata
# import models
# import schemas
# from database import engine, inspector, metadata
# TODO Add security check fastapi
### Reading Data ###
def get_tables_by_name(db: Session):
"""
Get all table names from database
:param db: Current database session
:return: List[String] of all table names
"""
return inspector.get_table_names()
# Get only emg and eeg tables (filter patients and datasets)
def get_session_tables(db: Session):
"""
Get only emg and eeg sessions names from database.
:param db: Current database session
:return: List[String] of emg and eeg table names
"""
allTables = get_tables_by_name(db)
a = [k for k in allTables if 'session' in k]
return a
### Creating data ###
# TODO:
# Edit patient
### Creating result ###
### Delete patient ###
## REPLACED WITH send_data() for a general usecase
# def create_emg_table(db: Session, tablename: String):
# metadata = MetaData()
# # name = String("{}_{}_{}".format(type, id, session))
# name = "emg_" + tablename
# table = Table(name, metadata,
# Column('timestamp', Integer, primary_key=True),
# Column('Channel 1', Float, nullable=False),
# Column('Channel 2', Float, nullable=False),
# Column('Channel 3', Float, nullable=False),
# Column('Channel 4', Float, nullable=False))
# metadata.create_all(engine)
# return name
# Add csv data to specified table
| [
7061,
6,
198,
9419,
8322,
796,
13610,
4149,
10133,
23520,
198,
4550,
3306,
44161,
5499,
994,
284,
779,
3690,
262,
1628,
13,
198,
7061,
6,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
44161,
282,
26599,
1330,
30277,
6601,
11,
... | 2.826023 | 684 |
import json
from collections import OrderedDict
from django.contrib.gis.gdal import GDALException
from django.contrib.gis.geos import GEOSException, GEOSGeometry
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from rest_framework.fields import Field, SerializerMethodField
__all__ = ['GeometryField', 'GeometrySerializerMethodField']
class GeometryField(Field):
"""
A field to handle GeoDjango Geometry fields
"""
type_name = 'GeometryField'
def __init__(
self, precision=None, remove_duplicates=False, auto_bbox=False, **kwargs
):
"""
:param auto_bbox: Whether the GeoJSON object should include a bounding box
"""
self.precision = precision
self.auto_bbox = auto_bbox
self.remove_dupes = remove_duplicates
super().__init__(**kwargs)
self.style.setdefault('base_template', 'textarea.html')
def _recursive_round(self, value, precision):
"""
Round all numbers within an array or nested arrays
value: number or nested array of numbers
precision: integer valueue of number of decimals to keep
"""
if hasattr(value, '__iter__'):
return tuple(self._recursive_round(v, precision) for v in value)
return round(value, precision)
def _rm_redundant_points(self, geometry, geo_type):
"""
Remove redundant coordinate pairs from geometry
geometry: array of coordinates or nested-array of coordinates
geo_type: GeoJSON type attribute for provided geometry, used to
determine structure of provided `geometry` argument
"""
if geo_type in ('MultiPoint', 'LineString'):
close = geo_type == 'LineString'
output = []
for coord in geometry:
coord = tuple(coord)
if not output or coord != output[-1]:
output.append(coord)
if close and len(output) == 1:
output.append(output[0])
return tuple(output)
if geo_type in ('MultiLineString', 'Polygon'):
return [self._rm_redundant_points(c, 'LineString') for c in geometry]
if geo_type == 'MultiPolygon':
return [self._rm_redundant_points(c, 'Polygon') for c in geometry]
return geometry
class GeoJsonDict(OrderedDict):
"""
Used for serializing GIS values to GeoJSON values
TODO: remove this when support for python 2.6/2.7 will be dropped
"""
def __init__(self, *args, **kwargs):
"""
If a string is passed attempt to pass it through json.loads,
because it should be a geojson formatted string.
"""
if args and isinstance(args[0], str):
try:
geojson = json.loads(args[0])
args = (geojson,)
except ValueError:
pass
super().__init__(*args, **kwargs)
def __str__(self):
"""
Avoid displaying strings like
``{ 'type': u'Point', 'coordinates': [12, 32] }``
in DRF browsable UI inputs (python 2.6/2.7)
see: https://github.com/openwisp/django-rest-framework-gis/pull/60
"""
return json.dumps(self)
| [
11748,
33918,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
70,
271,
13,
21287,
282,
1330,
27044,
1847,
16922,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
70,
271,
13,
469,
418,
1330,
... | 2.36812 | 1,399 |
# access csv budget_data csv
import os
import csv
from collection import defaultdict
csvpath = os.path.join('python-challenge.', 'election_data.csv')
with open(csvpath,'r', newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
csv_header = next(csvreader)
print(f"CSV Header: {csv_header}")
# Create lists to input the votes and candidates
total_votes = 0
candidates = set()
votes_num= []
ballots =[]
#Iterate to search for number of vote and candidates with votes
for row in csvreader:
total_votes+= 1
candidates.add(row[2])
votes_num.append()
if ballot.get(int(row[2]))is None:
ballots(int(row[2])) = 0
else:
ballots(row[2])+= 1
print(total_votes)
print(candidates)
print(ballots)
#Printing percent per candidate
for candidate in candidates:
candidate_vote = ballots.get(candidate)
percent = f"{(candidate_votes / total_votes*100)}"
print(f'{candidate} {percent}% {ballots[candidate]}')
#Printing winner
print(f"Winner:{max(ballot2, key=ballots.get)}")
#Export as a txt file
with open(f"{dir_path}/results.txt","w") as f:
f.write(f"Election Results\n")
f.write(f"--------------------------\n")
f.write("Total Votes:{(total_votes}\n")
for candidate in candidates:
candidate_vote = ballots.get(candidate)
percent = (f"{ballots.get(candidate/total_votes*100:)})
print(f'{candidate}%{percent}'%{ballots[candidates]})')
f.write(f"Winner: {max(ballots, key=ballots.get)}\n")
| [
2,
1895,
269,
21370,
4466,
62,
7890,
269,
21370,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
6738,
4947,
1330,
4277,
11600,
198,
40664,
6978,
796,
28686,
13,
6978,
13,
22179,
10786,
29412,
12,
36747,
3540,
2637,
11,
705,
14300,
62,
... | 2.495935 | 615 |
import copy
from unittest import mock
import pytest
from pylon.aws import sqs
@pytest.mark.parametrize(
'useFixtures',
[
('testMessage_inline', 'rawMessage_inline'),
('testMessage_s3', 'rawMessage_s3')
],
indirect=['useFixtures']
)
@pytest.fixture
| [
11748,
4866,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
279,
15158,
13,
8356,
1330,
19862,
82,
198,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7,
198,
220,
220,
220,
705,
1904... | 2.373984 | 123 |
from __future__ import print_function
from itertools import chain
import torch
import torch.nn as nn
from .base_model import BaseModel
from .backbone import create_backbone
from .pa_pool import PAPool
from .pcb_pool import PCBPool
from .global_pool import GlobalPool
from .ps_head import PartSegHead
from ..utils.model import create_embedding
from ..utils.model import init_classifier
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
764,
8692,
62,
19849,
1330,
7308,
17633,
198,
6738,
764,
1891,
15992,
1330,... | 3.574074 | 108 |
# -*- coding: utf-8 -*-
# #############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Michell Stuttgart
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
###############################################################################
from unittest import TestCase
from sigep.sigep.plp import XmlPLP
from sigep.sigep.plp import TagPLP
from sigep.sigep.plp import TagRemetente
from sigep.sigep.plp import TagObjetoPostal
from sigep.sigep.plp import TagDestinatario
from sigep.sigep.plp import TagNacional
from sigep.sigep.plp import TagServicoAdicional
from sigep.sigep.plp import TagDimesaoObjeto
from sigep.sigep.plp import TagDimensionTipoObjeto
from sigep.sigep.plp import TagDimensionAlturaLargura
from sigep.sigep.plp import TagDimensionComprimento
from sigep.sigep.plp import TagDimensionDiametro
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
1303,
29113,
29113,
7804,
4242,
198,
2,
198,
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
198,
2,
15069,
357,
66,
8,
1584,
7631,
12758,
520,
15318,
41651,
198,
2,... | 3.438182 | 550 |
"""
Technical Analysis (TA) Plotter
Library of functions to plot various technical indicator charts.
@author: eyu
"""
import os
import logging
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import constants as c
class TAPlot(object):
"""
Class for TA plots.
"""
| [
37811,
198,
45638,
14691,
357,
5603,
8,
28114,
353,
198,
198,
23377,
286,
5499,
284,
7110,
2972,
6276,
16916,
15907,
13,
198,
198,
31,
9800,
25,
1926,
84,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
18931,
198,
11748,
2603,
29487,
... | 3.23913 | 92 |
#!/usr/bin/env python3
from reportlab.platypus import SimpleDocTemplate
from reportlab.platypus import Paragraph, Spacer, Table, Image
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib import colors
from reportlab.graphics.shapes import Drawing
from reportlab.graphics.charts.barcharts import VerticalBarChart
from reportlab.graphics.charts.piecharts import Pie
import locale, operator
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
989,
23912,
13,
489,
265,
4464,
385,
1330,
17427,
23579,
30800,
198,
6738,
989,
23912,
13,
489,
265,
4464,
385,
1330,
2547,
6111,
11,
1338,
11736,
11,
8655,
11,
7412,
... | 3.525862 | 116 |
"""
Simple training loop; Boilerplate that could apply to any arbitrary neural network,
so nothing in this file really has anything to do with GPT specifically.
"""
import math
import logging
from tqdm import tqdm
import numpy as np
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data.dataloader import DataLoader
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(log_dir='./train_log')
logger = logging.getLogger(__name__)
| [
37811,
198,
26437,
3047,
9052,
26,
3248,
5329,
6816,
326,
714,
4174,
284,
597,
14977,
17019,
3127,
11,
198,
568,
2147,
287,
428,
2393,
1107,
468,
1997,
284,
466,
351,
402,
11571,
5734,
13,
198,
37811,
198,
198,
11748,
10688,
198,
1174... | 3.534722 | 144 |
# Employee is a sub-class of Person and adds the attribute staffnumber
x = Person("Marge", "Simpson")
y = Employee("Homer", "Simpson", "1007")
print(x.Name())
print(y.GetEmployee())
| [
198,
2,
36824,
318,
257,
850,
12,
4871,
286,
7755,
290,
6673,
262,
11688,
3085,
17618,
628,
198,
87,
796,
7755,
7203,
44,
1376,
1600,
366,
8890,
8430,
4943,
198,
88,
796,
36824,
7203,
39,
12057,
1600,
366,
8890,
8430,
1600,
366,
443... | 3 | 62 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.math.approx_max_k and tf.math.approx_min_k."""
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager.def_function import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
if __name__ == '__main__':
test.main()
| [
2,
15069,
33160,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,... | 3.62973 | 370 |
"""Script parses changelog from git based on 2 latest tags, formates changelog to .md format, creates/updates release on gitlab
"""
import subprocess
import os
import re
import json
def run_command( bash_command ):
"""Sends command to system.
"""
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if error is not None :
print(error)
print("Terminating...")
quit()
return output.strip('\n')
def parse_raw_changelog(non_formatted_text ) :
"""Parses raw changelog extracted from git.
Returns map {'issue_type': ['issues_array']}
"""
mapped_issues = {}
for line in non_formatted_text.splitlines() :
#skipping empty lines, non related to issues description lines, etc...
if line == "" :
continue
if line.startswith("See merge request") or line.startswith("This reverts commit"):
continue
if len(line)<=11 and not " " in line:
continue
categorized_issue = False
for issue_type in issue_types:
issue_prefix = issue_type + ": "
#checking lower cased strings to prevent skipping misnamed issues
if line.strip(" ").lower().startswith(issue_prefix.lower()) :
categorized_issue = True
line = line.replace(issue_prefix, "")
if issue_type not in mapped_issues :
mapped_issues.update({issue_type : [line]})
else:
mapped_issues[issue_type].append(line)
break
if categorized_issue :
continue
#if code reach that line - means issue type is not in issue_types -> typo or uncategorized issuetype
if uncategorized_issueType not in mapped_issues :
mapped_issues.update({uncategorized_issueType : [line]})
else:
mapped_issues[uncategorized_issueType].append(line)
continue
return mapped_issues
line_breaker = "\n"
issue_types = {"Enhancement","Fix", "Feature", "Ongoing", "Checkmark",
"Related", "Lab", "Live", "Refactor", "Nochangelog", "Technical"}
uncategorized_issueType = "Uncategorized"
def convert_changelog_text_to_md(non_formatted_text , header ) :
"""Returns .MD formatted changelog based on raw formatted text.
Header - 'title' for set of issues in that changelog
"""
mapped_issues = parse_raw_changelog(non_formatted_text)
if len(mapped_issues) == 0 :
return ""
res = ""
if not (not header or header == ""):
res += build_header_project(header) + line_breaker
res += build_changelog_body(mapped_issues)
return res
if __name__ == "__main__":
main()
| [
37811,
7391,
13544,
274,
1488,
417,
519,
422,
17606,
1912,
319,
362,
3452,
15940,
11,
1296,
689,
1488,
417,
519,
284,
764,
9132,
5794,
11,
8075,
14,
929,
19581,
2650,
319,
17606,
23912,
198,
37811,
198,
11748,
850,
14681,
198,
11748,
... | 2.456889 | 1,125 |
import pykazoo.queues
import pykazoo.restrequest
from unittest import TestCase
from unittest.mock import create_autospec
mock_rest_request = create_autospec(pykazoo.restrequest.RestRequest)
| [
11748,
12972,
74,
1031,
2238,
13,
4188,
947,
198,
11748,
12972,
74,
1031,
2238,
13,
2118,
25927,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
2251,
62,
2306,
418,
43106,
198,
198,
76,
735... | 2.909091 | 66 |
import numpy as np
from utils import discretize_states
class Agent():
"""Agent base class."""
def __init__(self, action_space, observation_space):
"""Initializes agent class."""
self.action_space = action_space
self.observation_space = observation_space
def act(self, observation, reward, done):
"""Perform an action according to a random policy."""
return self.action_space.sample()
class QLearningAgent(Agent):
"""Q-learning agent class."""
def __init__(self, action_space, observation_space, epsilon=0.8, alpha=0.2, gamma=0.9):
"""Initializes Q-learning agent class."""
super().__init__(action_space, observation_space)
self.observation_space = observation_space
self.epsilon = epsilon
self.alpha = alpha
self.gamma = gamma
self.scales = np.array([1, 10])
print(f'Obs low: {self.observation_space.low}')
# Discretize state space
# state_space = discretize_states(
# self.observation_space.high, self.observation_space,
# np.array([1, 10]))
state_space = (self.observation_space.high - self.observation_space.low) * self.scales
num_states = np.round(state_space, 0).astype(int) + 1
print(f'Num states: {num_states}')
self.Q = np.random.uniform(low=-1, high=1, size=(num_states[0], num_states[1], self.action_space.n))
def _discretize(self, observation):
"""Discretize states."""
observation = (observation - self.observation_space.low) * self.scales
return np.round(observation, 0).astype(int)
def act(self, observation, reward=0, done=False):
"""Performs an action according to an epsilon-greedy policy."""
observation = self._discretize(observation)
if np.random.random() < 1 - self.epsilon:
return np.argmax(self.Q[observation[0], observation[1]])
else:
return np.random.randint(0, self.action_space.n)
def update_Q_table(self, observation, new_observation, action, reward):
"""Updates element in Q-table."""
observation = self._discretize(observation)
new_observation = self._discretize(new_observation)
self.Q[observation[0], observation[1], action] += (
self.alpha * (
reward + self.gamma * np.max(self.Q[new_observation[0], new_observation[1]]) -
self.Q[observation[0], observation[1], action]
)
)
if __name__ == "__main__":
pass
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
3384,
4487,
1330,
1221,
1186,
1096,
62,
27219,
628,
198,
4871,
15906,
33529,
198,
220,
220,
220,
37227,
36772,
2779,
1398,
526,
15931,
628,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
... | 2.350184 | 1,088 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: public_cloud_instance_info
short_description: Retrieve all info for a OVH public cloud instnace
description:
- This module retrieves all info from a OVH public cloud instance
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
service_name:
required: true
description: The service_name
instance_id:
required: true
description: The instance uuid
'''
EXAMPLES = '''
synthesio.ovh.public_cloud_instance_info:
instance_id: "{{ instance_id }}"
service_name: "{{ service_name }}"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
357,
48546,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
8,
198,
198,
6738,
9093,
856,
13,
... | 2.665829 | 398 |
from __future__ import unicode_literals
from django.apps import AppConfig
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.619048 | 21 |
#
# _/_/_/ _/_/_/ _/_/_/ _/_/_/ _/ _/ _/_/_/
# _/ _/ _/ _/ _/ _/ _/ _/ _/
# _/_/_/ _/_/_/ _/ _/ _/_/ _/_/
# _/ _/ _/ _/ _/ _/ _/ _/ _/
# _/_/_/ _/ _/ _/_/_/ _/_/_/ _/ _/ _/_/_/
#
# By Vlad Ivanov, 2018.
# By Nikita Ivanov, 2018
#
# Email: vlad94568@gmail.com
from src.common import *
# Sparkling star.
# Initializes this star.
# Draws this star.
| [
2,
198,
2,
220,
220,
220,
220,
220,
4808,
47835,
47835,
14,
220,
220,
220,
4808,
47835,
47835,
14,
220,
220,
220,
4808,
47835,
47835,
14,
220,
220,
220,
4808,
47835,
47835,
14,
220,
4808,
14,
220,
220,
220,
4808,
14,
220,
220,
220... | 1.560897 | 312 |
import argparse
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import codecs
import getpass
diagram = DbMap()
diagram.run()
| [
11748,
1822,
29572,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
6246,
10297,
198,
11748,
40481,
82,
198,
11748,
651,
6603,
628,
198,
10989,
6713,
796,
360,
65,
13912,
3419,
198,
1098... | 3.391304 | 46 |
# Generated by Django 3.2 on 2021-04-11 19:39
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
319,
33448,
12,
3023,
12,
1157,
678,
25,
2670,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.966667 | 30 |
from functools import partial
from tornado import gen
from tornado.gen import UnknownKeyError
from tornado.ioloop import IOLoop
version_tuple = (0, 1, '+')
version = '.'.join(map(str, version_tuple))
"""Current version of YieldPoints."""
__all__ = [
'TimeoutException', 'WaitAny', 'WithTimeout', 'Timeout', 'Cancel',
'CancelAll'
]
class WaitAny(gen.YieldPoint):
"""Wait for several keys, and continue when the first of them is complete.
Inspired by Ben Darnell in `a conversation on the Tornado mailing list
<https://groups.google.com/d/msg/python-tornado/PCHidled01M/B7sDjNP2OpQJ>`_.
"""
class WithTimeout(gen.YieldPoint):
"""Wait for a YieldPoint or a timeout, whichever comes first.
:Parameters:
- `deadline`: A timestamp or timedelta
- `yield_point`: A ``gen.YieldPoint`` or a key
- `io_loop`: Optional custom ``IOLoop`` on which to run timeout
"""
class Cancel(gen.YieldPoint):
"""Cancel a key so ``gen.engine`` doesn't raise a LeakedCallbackError
"""
class CancelAll(gen.YieldPoint):
"""Cancel all keys for which the current coroutine has registered callbacks
"""
| [
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
6738,
33718,
1330,
2429,
198,
6738,
33718,
13,
5235,
1330,
16185,
9218,
12331,
198,
6738,
33718,
13,
1669,
11224,
1330,
314,
3535,
11224,
628,
198,
9641,
62,
83,
29291,
796,
357,
15,
11,
... | 2.895 | 400 |
import re
import types
from collections import defaultdict
from itertools import izip
from nearpy.storage.storage import Storage
class MemoryStorage(Storage):
""" Storage in memory. """
def clear(self, bucketkeys):
"""
Parameters
----------
bucket_keys: iterable of string
keys from which to retrieve values
prefix: string
if set, clear every buckets having this prefix
Return
------
count: int
number of buckets cleared
"""
if not isinstance(bucketkeys, types.ListType) and not isinstance(bucketkeys, types.GeneratorType):
bucketkeys = [bucketkeys]
count = 0
for bucketkey in bucketkeys:
key = self.keyprefix + "_" + bucketkey
if key in self.buckets:
del self.buckets[key]
count += 1
return count
def count(self, bucketkeys):
"""
Parameters
----------
bucketkeys: iterable of string
keys from which to retrieve values
Return
------
counts: list of int
size of each given bucket
"""
counts = []
suffix = "_patch"
for bucketkey in bucketkeys:
key = self.keyprefix + "_" + bucketkey + suffix
counts.append(len(self.buckets[key]))
return counts
| [
11748,
302,
198,
11748,
3858,
198,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
340,
861,
10141,
1330,
220,
528,
541,
198,
6738,
1474,
9078,
13,
35350,
13,
35350,
1330,
20514,
628,
198,
4871,
14059,
31425,
7,
31425,
2599,
198,
220,
... | 2.280193 | 621 |
from .chunked_embedding import chunked_embedding, script_chunked_embedding
from .generate_prefetching_rule import generate_prefetching_rule
from .lazy_loading import lazy_loading
| [
6738,
764,
354,
2954,
276,
62,
20521,
12083,
1330,
16058,
276,
62,
20521,
12083,
11,
4226,
62,
354,
2954,
276,
62,
20521,
12083,
198,
6738,
764,
8612,
378,
62,
3866,
69,
7569,
278,
62,
25135,
1330,
7716,
62,
3866,
69,
7569,
278,
62,... | 3.196429 | 56 |
#!/usr/bin/env python
from distutils.core import setup
import sys, os
# For some commands, use setuptools
if len(set(['develop', 'sdist', 'release', 'bdist_egg', 'bdist_rpm', 'bdist',
'bdist_dumb', 'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload']).intersection(sys.argv)) > 0:
from setupegg import extra_setuptools_args
# extra_setuptools_args is injected by the setupegg.py script, for
# running the setup with setuptools.
if not 'extra_setuptools_args' in globals():
extra_setuptools_args = dict()
LONG_DESCRIPTION = """
PyXNAT
======
**pyxnat** provides an API to access data on XNAT (see http://xnat.org)
servers.
Visit https://pyxnat.github.io/pyxnat for more information.
"""
setup(name='pyxnat',
version=get_version(),
summary='XNAT in Python',
author='Yannick Schwartz',
author_email='yannick.schwartz@cea.fr',
url='http://packages.python.org/pyxnat/',
packages=['pyxnat'],
package_data={'pyxnat': ['core/*.py', '*.py'], },
description="""XNAT in Python""",
long_description=LONG_DESCRIPTION,
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
'Topic :: Utilities',
'Topic :: Internet :: WWW/HTTP',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 2.7',
],
platforms='any',
install_requires=['lxml>=4.3', 'requests>=2.20', 'requests[security]'],
**extra_setuptools_args)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
11748,
25064,
11,
28686,
198,
198,
2,
1114,
617,
9729,
11,
779,
900,
37623,
10141,
198,
361,
18896,
7,
2617,
7,
17816,
16244,
3256,
... | 2.445013 | 782 |
import asyncio
import aiohttp
import random
import requests
import time
from settings import string, TOKEN
ID = []
# with open('message.txt', mode='r', encoding='utf8'):
asyncio.run(main())
| [
11748,
30351,
952,
198,
11748,
257,
952,
4023,
198,
11748,
4738,
198,
11748,
7007,
198,
11748,
640,
198,
6738,
6460,
1330,
4731,
11,
5390,
43959,
198,
198,
2389,
796,
17635,
198,
198,
2,
351,
1280,
10786,
20500,
13,
14116,
3256,
4235,
... | 3.126984 | 63 |
import requests
import pandas as pd
import datetime as dt
from bs4 import BeautifulSoup
def my_to_datetime(date_str):
"""
Transform dates with with 01 - 24 hours to 00 - 23 hours.
It is based on this question on Stack Over Flow:
https://stackoverflow.com/questions/43359479/pandas-parsing-2400-instead-of-0000
Parameters
----------
date_str : str
string with date.
Returns
-------
Timestamp
date as Timestamp.
"""
if date_str[11:13] != '24':
return pd.to_datetime(date_str, format='%d/%m/%Y_%H:%M')
date_str = date_str[0:11] + '00' + date_str[13:]
return pd.to_datetime(date_str, format='%d/%m/%Y_%H:%M') + dt.timedelta(days=1)
def cetesb_data_download(cetesb_login, cetesb_password,
start_date, end_date,
parameter, station, file_name=None, csv=False):
"""
Download a parameter for one Air Quality Station station
from CETESB AQS network
Parameters
----------
cetesb_login : str
cetesb qualar username.
cetesb_password : str
cetesb qualar username's password.
start_date : str
date to start download in %dd/%mm/%YYYY.
end_date : str
date to end download in %dd/%mm/%YYYY.
parameter : int
parameter code.
station : int
AQS code.
file_name : str, optional
Name of csv file.
csv : Bool, optional
Export to csv file. The default is False.
Returns
-------
dat_complete : pandas DataFrame
DataFrame with a column with date and parameter values.
"""
login_data = {
'cetesb_login': cetesb_login,
'cetesb_password': cetesb_password
}
search_data = {
'irede': 'A',
'dataInicialStr': start_date,
'dataFinalStr': end_date,
'iTipoDado': 'P',
'estacaoVO.nestcaMonto': station,
'parametroVO.nparmt': parameter
}
with requests.Session() as s:
url = "https://qualar.cetesb.sp.gov.br/qualar/autenticador"
r = s.post(url, data=login_data)
url2 = "https://qualar.cetesb.sp.gov.br/qualar/exportaDados.do?method=pesquisar"
r = s.post(url2, data=search_data)
soup = BeautifulSoup(r.content, 'lxml')
data = []
table = soup.find('table', attrs={'id': 'tbl'})
rows = table.find_all('tr')
row_data = rows[2:]
for row in row_data:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
dat = pd.DataFrame(data)
# Creating a complete df with all dates
day1 = pd.to_datetime(start_date, format='%d/%m/%Y')
day2 = pd.to_datetime(end_date, format='%d/%m/%Y') + dt.timedelta(days=1)
all_date = pd.DataFrame(index=pd.date_range(day1.strftime('%m/%d/%Y'),
day2.strftime('%m/%d/%Y'),
freq='H'))
if len(dat) <= 1:
dat = pd.DataFrame(columns=['day', 'hour', 'name', 'pol_name', 'units', 'val'])
else:
dat = dat[[3, 4, 6, 7, 8, 9]]
dat.columns = ['day', 'hour', 'name', 'pol_name', 'units', 'val']
dat['date'] = dat.day + '_' + dat.hour
# Changing date type to string to datestamp
dat['date'] = dat.date.apply(my_to_datetime)
# Changing val type from string/object to numeric
dat['val'] = dat.val.str.replace(',', '.').astype(float)
# Filling empty dates
dat.set_index('date', inplace=True)
dat_complete = all_date.join(dat)
dat_complete = dat_complete.drop(['day', 'hour'], axis=1)
if file_name:
file_name = file_name + '.csv'
else:
file_name = str(parameter) + '_' + str(station)
if csv:
dat_complete.to_csv(file_name, index_label='date')
else:
return dat_complete
def all_met(cetesb_login, cetesb_password, start_date, end_date, station,
in_k=False, rm_flag=True, file_name=None, csv_met=False):
"""
Download meteorological parameters
Parameters
----------
cetesb_login : str
cetesb qualar username.
cetesb_password : str
cetesb qualar username's password.
start_date : str
date to start download in %dd/%mm/%YYYY.
end_date : str
date to end download in %dd/%mm/%YYYY.
station : int
AQS code.
in_k : Bool, optional
Temperature in Kelvin. The default is False.
rm_flag : Bool, optional
Filter wind calm and no values wind direction. The default is True.
file_name : str, optional
Name of csv file.
csv_met : Bool, optional
Export to csv file. The default is False.
Returns
-------
all_met_df : pandas DataFrame
Data Frame with date index (America/Sao_Paulo),
TC, RH, WS and WD columns.
"""
tc = cetesb_data_download(cetesb_login, cetesb_password,
start_date, end_date, 25, station)
rh = cetesb_data_download(cetesb_login, cetesb_password,
start_date, end_date, 28, station)
ws = cetesb_data_download(cetesb_login, cetesb_password,
start_date, end_date, 24, station)
wd = cetesb_data_download(cetesb_login, cetesb_password,
start_date, end_date, 23, station)
if in_k:
K = 273.15
else:
K = 0
all_met_df = pd.DataFrame({
't2': tc.val + K,
'rh2': rh.val,
'ws': ws.val,
'wd': wd.val
}, index=tc.index)
# all_met_df.index = all_met_df.index.tz_localize('America/Sao_Paulo')
# Filtering 777 and 888 values
if rm_flag:
filter_flags = all_met_df['wd'] <= 360
all_met_df['wd'].where(filter_flags, inplace=True)
if file_name:
file_name = file_name + '.csv'
else:
file_name = 'all_met_' + str(station) + '.csv'
# Export to csv
if csv_met:
all_met_df.to_csv(file_name, index_label='date')
else:
return all_met_df
| [
11748,
7007,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
4818,
8079,
355,
288,
83,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
628,
198,
4299,
616,
62,
1462,
62,
19608,
8079,
7,
4475,
62,
2536,
2599,
198,
220,
220,
220,
... | 2.083105 | 2,924 |
from dataclasses import dataclass
from bindings.gmd.time_topology_complex_type import TimeTopologyComplexType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class TimeTopologyComplex(TimeTopologyComplexType):
"""A temporal topology complex shall be the connected acyclic directed
graph composed of temporal topological primitives, i.e. time nodes and time
edges.
Because a time edge may not exist without two time nodes on its
boundaries, static features have time edges from a temporal topology
complex as the values of their temporal properties, regardless of
explicit declarations. A temporal topology complex expresses a
linear or a non-linear graph. A temporal linear graph, composed of a
sequence of time edges, provides a lineage described only by
“substitution” of feature instances or feature element values. A
time node as the start or the end of the graph connects with at
least one time edge. A time node other than the start and the end
shall connect to at least two time edges: one of starting from the
node, and another ending at the node. A temporal topological complex
is a set of connected temporal topological primitives. The member
primtives are indicated, either by reference or by value, using the
primitive property.
"""
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
34111,
13,
70,
9132,
13,
2435,
62,
4852,
1435,
62,
41887,
62,
4906,
1330,
3862,
9126,
1435,
5377,
11141,
6030,
198,
198,
834,
45,
29559,
47,
11598,
834,
796,
366,
4023,
1378,
... | 3.70195 | 359 |
import logging
from aiohttp import web
log = logging.getLogger(__name__)
routes = web.RouteTableDef()
@routes.get(r'/fonts/{fontstack:\s+}/{range:\s+}.pbf', name='tiles')
@routes.get(r'/tiles/{z:\d+}/{x:\d+}/{y:\d+}.pbf', name='tiles')
| [
11748,
18931,
198,
198,
6738,
257,
952,
4023,
1330,
3992,
628,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
81,
448,
274,
796,
3992,
13,
43401,
10962,
7469,
3419,
628,
198,
31,
81,
448,
274,
13,
1136,
7... | 2.131579 | 114 |
from .bitbucket import Bitbucket
from .github import Github
from .gitlab import Gitlab
from .gogs import Gogs
from .gitea import Gitea
| [
6738,
764,
2545,
27041,
316,
1330,
4722,
27041,
316,
198,
6738,
764,
12567,
1330,
38994,
198,
6738,
764,
18300,
23912,
1330,
15151,
23912,
198,
6738,
764,
70,
18463,
1330,
402,
18463,
198,
6738,
764,
70,
578,
64,
1330,
402,
578,
64,
1... | 3.214286 | 42 |
"""
Plot input and reconstruction statistics, as generated by calculate_encodings.py
with the --statistics flag, with output file pd_friendly_stats.txt.
Usage:
"""
import argparse
from pathlib import Path
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import rcParams as rc
from utilities.gnina_functions import Timer, format_time
def read_channel_map(filename):
"""Gives a function mapping channel number to atom type.
Arguments:
filename: path to ligmap or recmap to be read
Returns:
Tuple containing the number of channels in the atom map, and a function
which maps from a channel number to its atom type, as specified in the
atom map.
"""
if filename is None:
return -1, lambda x: x
with open(Path(filename).expanduser().resolve(), 'r') as f:
chunks = f.read().strip().split('\n')
mapping = {str(channel): '\n'.join(name.split())
for channel, name in enumerate(chunks)}
int_mapping = {channel: '\n'.join(name.split())
for channel, name in enumerate(chunks)}
return len(mapping), lambda x: mapping.get(x, int_mapping.get(x, x))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dirs', type=str, nargs='?',
help='Directories containing, at some level, '
'files named "pd_friendly_stats.txt"')
parser.add_argument('--output_filename', type=str,
default='~/hist.png', help='Name of output plot')
parser.add_argument('--recmap', '-r', type=str, required=False,
help='Atom map with different smina types on each line')
parser.add_argument('--ligmap', '-l', type=str, required=False,
help='Atom map with different smina types on each line')
parser.add_argument('--reconstructions_only', '-o', action='store_true',
help='Only plot reconstruction stats (useful for '
'binary inputs)')
args = parser.parse_args()
main(args)
| [
37811,
198,
43328,
5128,
290,
25056,
7869,
11,
355,
7560,
416,
15284,
62,
12685,
375,
654,
13,
9078,
198,
4480,
262,
1377,
14269,
3969,
6056,
11,
351,
5072,
2393,
279,
67,
62,
13120,
62,
34242,
13,
14116,
13,
198,
198,
28350,
25,
19... | 2.483081 | 857 |
from app.conf.environ import env
NOTION_TOKEN = env('NOTION_TOKEN', cast=str, default='')
| [
6738,
598,
13,
10414,
13,
268,
2268,
1330,
17365,
198,
198,
11929,
2849,
62,
10468,
43959,
796,
17365,
10786,
11929,
2849,
62,
10468,
43959,
3256,
3350,
28,
2536,
11,
4277,
28,
7061,
8,
198
] | 2.676471 | 34 |
# Введение
# В этом уроке мы увидели, как создать классификатор изображений, прикрепив головку из плотных слоев к предварительно
# обученной основе. База, которую мы использовали, была от модели под названием VGG16. Мы увидели, что архитектура
# VGG16 была склонна переоценивать этот набор данных. В ходе этого курса вы узнаете несколько способов улучшить эту
# первоначальную попытку.
# Первый способ, который вы увидите, - это использовать базу, более подходящую для набора данных. База, на которой
# основана эта модель, называется InceptionV1 (также известная как GoogLeNet). InceptionV1 был одним из первых
# победителей конкурса ImageNet. Один из его преемников, InceptionV4, сегодня является одним из самых современных.
# Setup feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.computer_vision.ex1 import *
# Imports
import os, warnings
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing import image_dataset_from_directory
# Reproducability
set_seed()
# Set Matplotlib defaults
plt.rc('figure', autolayout=True)
plt.rc('axes', labelweight='bold', labelsize='large',
titleweight='bold', titlesize=18, titlepad=10)
plt.rc('image', cmap='magma')
warnings.filterwarnings("ignore") # to clean up output cells
# Load training and validation sets
ds_train_ = image_dataset_from_directory(
'../input/car-or-truck/train',
labels='inferred',
label_mode='binary',
image_size=[128, 128],
interpolation='nearest',
batch_size=64,
shuffle=True,
)
ds_valid_ = image_dataset_from_directory(
'../input/car-or-truck/valid',
labels='inferred',
label_mode='binary',
image_size=[128, 128],
interpolation='nearest',
batch_size=64,
shuffle=False,
)
# Data Pipeline
AUTOTUNE = tf.data.experimental.AUTOTUNE
ds_train = (
ds_train_
.map(convert_to_float)
.cache()
.prefetch(buffer_size=AUTOTUNE)
)
ds_valid = (
ds_valid_
.map(convert_to_float)
.cache()
.prefetch(buffer_size=AUTOTUNE)
)
# Модель InceptionV1, предварительно обученная в ImageNet, доступна в репозитории TensorFlow Hub, но мы загрузим ее
# из локальной копии. Запустите эту ячейку, чтобы загрузить InceptionV1 для своей базы.
import tensorflow_hub as hub
pretrained_base = tf.keras.models.load_model(
'../input/cv-course-models/cv-course-models/inceptionv1'
)
# 1) Определите предварительно обученную базу
# Теперь, когда у вас есть предварительно обученная база для извлечения наших признаков, решите, должна ли эта база
# быть обучаемой или нет.
# YOUR_CODE_HERE
pretrained_base.trainable = False
# Правильно: при переносе обучения, как правило, не рекомендуется переобучать всю базу - по крайней мере,
# без некоторой осторожности. Причина в том, что случайные веса в голове изначально создают большие обновления
# градиента, которые распространяются обратно на базовые слои и уничтожают большую часть предварительной тренировки.
# Используя методы, известные как точная настройка, можно дополнительно обучить базу на новых данных, но для этого
# требуется некоторая осторожность.
# 2) Прикрепите голову
# Теперь, когда база определена для извлечения признаков, создайте заголовок плотных слоев для выполнения классификации,
# Подсказка: вам нужно добавить два новых плотных слоя. У первого должно быть 6 юнитов и активация relu. Второй
# должен иметь 1 единицу и «сигмовидную» активацию.
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential([
pretrained_base,
layers.Flatten(),
# YOUR CODE HERE. Attach a head of dense layers.
layers.Dense(6, activation='relu'),
layers.Dense(1, activation='sigmoid'),
])
# 3) Тренировка
# Перед обучением модели в Keras вам необходимо указать оптимизатор для выполнения градиентного спуска,
# функцию потерь, которая должна быть минимизирована, и (необязательно) любые показатели производительности. Алгоритм
# оптимизации, который мы будем использовать в этом курсе, называется «Адам», который обычно хорошо работает
# независимо от того, какую проблему вы пытаетесь решить.
# Однако потери и показатели должны соответствовать той проблеме, которую вы пытаетесь решить. Наша проблема -
# проблема двоичной классификации: автомобиль закодирован как 0, а грузовик закодирован как 1. Выберите подходящие
# потери и соответствующую метрику точности для двоичной классификации.
optimizer = tf.keras.optimizers.Adam(epsilon=0.01)
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['binary_accuracy'],
)
# train
history = model.fit(
ds_train,
validation_data=ds_valid,
epochs=30,
)
# analiz
import pandas as pd
history_frame = pd.DataFrame(history.history)
history_frame.loc[:, ['loss', 'val_loss']].plot()
history_frame.loc[:, ['binary_accuracy', 'val_binary_accuracy']].plot();
# 4) Изучите потери и точность
# Вы замечаете разницу между этими кривыми обучения и кривыми для VGG16 из учебника? Что эта разница говорит вам о
# том, чему научилась эта модель (InceptionV2) по сравнению с VGG16? Есть ли способы, в которых один лучше другого?
# Худший?
# После того, как вы подумали об этом, запустите ячейку ниже, чтобы увидеть ответ.
# То, что потеря обучения и потеря проверки остаются довольно близкими, свидетельствует о том, что модель не просто
# запоминает данные обучения, а, скорее, изучает общие свойства двух классов. Но поскольку эта модель сходится с
# большими потерями, чем модель VGG16, вполне вероятно, что она недостаточно подходит для некоторых и может выиграть
# от некоторой дополнительной емкости.
# Вывод
# В этом первом уроке вы изучили основы сверточных классификаторов изображений, которые состоят из основы для
# извлечения функций из изображений и головы, которая использует функции для определения класса изображения. Вы также
# увидели, как построить классификатор с трансферным обучением на предварительно обученной основе.
| [
2,
12466,
240,
38857,
16843,
43666,
16843,
22177,
18849,
16843,
198,
2,
12466,
240,
220,
141,
235,
20375,
25443,
120,
220,
35072,
21169,
25443,
118,
16843,
12466,
120,
45035,
220,
35072,
38857,
18849,
43666,
16843,
30143,
18849,
11,
12466,
... | 1.255448 | 4,772 |
# Uses python3
import sys
input = sys.stdin.read()
n = int(input)
sequence = list(optimal_sequence(n))
print(len(sequence) - 1)
for x in sequence:
print(x, end=' ')
| [
2,
36965,
21015,
18,
198,
11748,
25064,
198,
198,
15414,
796,
25064,
13,
19282,
259,
13,
961,
3419,
198,
77,
796,
493,
7,
15414,
8,
198,
43167,
796,
1351,
7,
8738,
4402,
62,
43167,
7,
77,
4008,
198,
4798,
7,
11925,
7,
43167,
8,
... | 2.575758 | 66 |
import cv2
import numpy as np
def get_boundary_point(y, x, angle, H, W):
'''
Given point y,x with angle, return a two point in image boundary with shape [H, W]
return point:[x, y]
'''
point1 = None
point2 = None
if angle == -np.pi / 2:
point1 = (x, 0)
point2 = (x, H-1)
elif angle == 0.0:
point1 = (0, y)
point2 = (W-1, y)
else:
k = np.tan(angle)
if y-k*x >=0 and y-k*x < H: #left
if point1 == None:
point1 = (0, int(y-k*x))
elif point2 == None:
point2 = (0, int(y-k*x))
if point2 == point1: point2 = None
# print(point1, point2)
if k*(W-1)+y-k*x >= 0 and k*(W-1)+y-k*x < H: #right
if point1 == None:
point1 = (W-1, int(k*(W-1)+y-k*x))
elif point2 == None:
point2 = (W-1, int(k*(W-1)+y-k*x))
if point2 == point1: point2 = None
# print(point1, point2)
if x-y/k >= 0 and x-y/k < W: #top
if point1 == None:
point1 = (int(x-y/k), 0)
elif point2 == None:
point2 = (int(x-y/k), 0)
if point2 == point1: point2 = None
# print(point1, point2)
if x-y/k+(H-1)/k >= 0 and x-y/k+(H-1)/k < W: #bottom
if point1 == None:
point1 = (int(x-y/k+(H-1)/k), H-1)
elif point2 == None:
point2 = (int(x-y/k+(H-1)/k), H-1)
if point2 == point1: point2 = None
# print(int(x-y/k+(H-1)/k), H-1)
if point2 == None : point2 = point1
return point1, point2
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4299,
651,
62,
7784,
560,
62,
4122,
7,
88,
11,
2124,
11,
9848,
11,
367,
11,
370,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
11259,
966,
331,
11,
87,... | 1.686492 | 992 |
from test_case import *
import numpy as np
import sail
import time
import unittest
elementwise_options = [(12,), (512, 128), (3, 14, 2), (8, 12, 12, 12), (3, 1, 5, 6), (13, 14)]
broadcasted_options = [(512, 128), (3, 14, 2), (8, 12, 12, 12), (3, 1, 5, 6), (13, 14)]
unary_elementwise_options = [(12,), (32, 12), (3, 14, 2), (8, 12, 12, 12), (3, 1, 5, 6), (13, 14)]
unary_broadcasted_options = [(32, 12), (3, 14, 2), (8, 12, 12, 12), (3, 1, 5, 6), (13, 14)]
grad_options = [(32, 3, 5), (3), (1), (2, 33, 2, 5)]
## 1
| [
6738,
1332,
62,
7442,
1330,
1635,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
14936,
198,
11748,
640,
198,
11748,
555,
715,
395,
198,
198,
30854,
3083,
62,
25811,
796,
47527,
1065,
11,
828,
357,
25836,
11,
13108,
828,
357,
18,
11,... | 2.189076 | 238 |
import re
from django.conf import settings
from django.http import HttpResponsePermanentRedirect, HttpResponseRedirect
from django.utils.timezone import now
from .models import PageNotFoundEntry
IGNORED_404S = getattr(settings, 'IGNORED_404S', [
r'^/static/',
r'^/favicon.ico'
])
| [
11748,
302,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
47,
30312,
7738,
1060,
11,
367,
29281,
31077,
7738,
1060,
198,
6738,
42625,
14208,
13,
26791,
13,
2435,
11340,
... | 2.90099 | 101 |
# -*- coding: utf-8 -*-
"""This file imports Python modules that registers scaffolders."""
from l2tscaffolder.scaffolders import plaso_sqlite
from l2tscaffolder.scaffolders import timesketch_index
from l2tscaffolder.scaffolders import timesketch_sketch
from l2tscaffolder.scaffolders import turbinia
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
1212,
2393,
17944,
11361,
13103,
326,
28441,
41498,
727,
364,
526,
15931,
198,
6738,
300,
17,
912,
66,
2001,
19892,
13,
1416,
2001,
727,
364,
1330,
458,
292,
78,
... | 2.884615 | 104 |
""" UTPM == Univariate Taylor Polynomial of Matrices.
UTPM arithmetic means to apply functions to
..math::
[A]_D = \sum_{d=0}^{D-1} A_d t^d
A_d = \frac{d^d}{dt^d}|_{t=0} \sum_{k=0}^{D-1} A_k t^k
The underlying data structure is a numpy.array of shape (D,P,N,M)
where
D: D number of coefficients, i.e. D-1 is the degree of the polynomial
P: number of directions
N: number of rows of the matrix A
M: number of cols of the matrix A
The data structure is stored in the attribute UTPM.data and can be accessed.
Module Structure:
~~~~~~~~~~~~~~~~~
utpm.algorithms:
algorithms that operate directly on the (D,P,N,M) numpy.array.
utpm.utpm:
Implementation of the class UTPM that makes is a thin wrapper for the
algorithms implemented in utpm.algorithms.
"""
from .utpm import *
| [
37811,
19255,
5868,
6624,
791,
42524,
8121,
12280,
26601,
498,
286,
6550,
45977,
13,
198,
198,
3843,
5868,
34768,
1724,
284,
4174,
5499,
284,
198,
492,
11018,
3712,
198,
220,
220,
220,
685,
32,
60,
62,
35,
796,
3467,
16345,
23330,
67,... | 2.74744 | 293 |
from setuptools import find_packages, setup
setup(
name='painterbot',
packages=find_packages(where="src/painterbot"),
use_scm_version={
"write_to": "src/painterbot/_version.py",
'fallback_version': "0.0.0-server",
},
license='Apache-2.0',
author='Marcelo Duarte Trevisani',
author_email='marceloduartetrevisani@gmail.com',
description='Bot example',
python_requires=">=3.7",
install_requires=["gidgethub", "cachetools", "aiohttp", "gitpython", "pre-commit", "gcg"],
setup_requires=["setuptools_scm"],
extras_require={"testing": ["pytest", "mock", "pytest-cov"]},
entry_points={
"console_scripts": [
"painterbot = painterbot.__main__:init_webapp",
]
},
)
| [
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
35436,
353,
13645,
3256,
198,
220,
220,
220,
10392,
28,
19796,
62,
43789,
7,
3003,
2625,
10677,
14,
35436,
353,
13645,
1234... | 2.323077 | 325 |
from typing import Any, List
from torch import nn
from torchmetrics import MetricCollection, Accuracy, F1, Precision, Recall, CohenKappa, MatthewsCorrcoef, AUROC
from torchmetrics import CosineSimilarity, MeanAbsoluteError, MeanAbsolutePercentageError, MeanSquaredError, PearsonCorrcoef, R2Score, SpearmanCorrcoef
import wandb
from typing import Dict
import pytorch_lightning as pl
import torch
from pytorch_tabnet.tab_network import TabNet
from pytorch_tabnet.utils import create_explain_matrix
| [
6738,
19720,
1330,
4377,
11,
7343,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
4164,
10466,
1330,
3395,
1173,
36307,
11,
33222,
11,
376,
16,
11,
39281,
11,
44536,
11,
17154,
42,
20975,
11,
22233,
10606,
81,
1073,
891,
11,
317,
... | 3.427586 | 145 |
import os
import logging
from .consts import *
# Language - settings
lang = "eng"
# Logger - settings
available_languages = ["pl", "eng"]
LOGGING_FORMAT = "\r%(asctime)s, %(levelname)-8s [`%(funcName)s` %(filename)s:%(lineno)d] %(message)s"
# Api dates format
DATE_FORMAT = "%Y-%m-%d"
| [
11748,
28686,
198,
11748,
18931,
198,
198,
6738,
764,
1102,
6448,
1330,
1635,
198,
198,
2,
15417,
532,
6460,
198,
17204,
796,
366,
1516,
1,
198,
198,
2,
5972,
1362,
532,
6460,
198,
15182,
62,
75,
33213,
796,
14631,
489,
1600,
366,
1... | 2.377049 | 122 |
import json
from django.views.generic import TemplateView
from django.conf import settings
from django.http import Http404
from base import mods
| [
11748,
33918,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
37350,
7680,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
26429,
198,
198,
6738,
2779,
1330,
13743,
628
] | 3.868421 | 38 |
from netqasm.sdk.epr_socket import EprMeasBasis, EPRSocket
from netqasm.sdk.external import NetQASMConnection
if __name__ == "__main__":
main()
| [
6738,
2010,
80,
8597,
13,
21282,
74,
13,
538,
81,
62,
44971,
1330,
412,
1050,
5308,
292,
15522,
271,
11,
412,
4805,
39105,
198,
6738,
2010,
80,
8597,
13,
21282,
74,
13,
22615,
1330,
3433,
48,
1921,
44,
32048,
628,
198,
198,
361,
1... | 2.559322 | 59 |
# Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import pickle
from skdecide.discrete_optimization.generic_tools.do_problem import Solution, Problem, \
build_evaluate_function_aggregated, ModeOptim, ParamsObjectiveFunction
from skdecide.discrete_optimization.generic_tools.ls.local_search import RestartHandler, \
ModeMutation, ResultLS
from skdecide.discrete_optimization.generic_tools.do_mutation import Mutation
from skdecide.discrete_optimization.generic_tools.result_storage.result_storage import ResultStorage, ParetoFront
| [
2,
15069,
357,
66,
8,
31600,
45346,
290,
663,
29116,
13,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,
262,
6808,
8619,
286,
428,
2723,
5509,
13,
198,
198,
6738,
11593,
... | 3 | 251 |
# coding: utf-8
import sys
import numpy as np
from Node import Node
# set the maximal recursion limits here.
sys.setrecursionlimit(10000)
class baseClassDecisionTree(object):
'''
The main class of decision tree.
'''
def __init__(self, feature_discrete=[], treeType='C4.5'):
'''
feature_discrete: a dict with its each key-value pair being (feature_name: True/False),
where True means the feature is discrete and False means the feature is
continuous.
type: ID3/C4.5/CART
pruning: pre/post
'''
self.feature_discrete=feature_discrete
self.treeType=treeType
self.leaf_count=0
self.tmp_classification=''
self.tree=None
def Entropy(self, list_of_class):
'''
Compute the entropy for the given list of class.
list_of_class: an array of classification labels, e.g. ['duck', 'duck', 'dolphin']
'duck': 2/3, 'dolphin': 1/3, so the entropy for this array is 0.918
'''
count={}
for key in list_of_class:
count[key]=count.get(key, 0)+1
frequency=np.array(tuple(count.values()))/len(list_of_class)
return -1*np.vdot(frequency, np.log2(frequency))
def Information_Gain(self, list_of_class, grouped_list_of_class):
'''
Compute the Information Gain.
list_of_class: an array of classification labels, e.g. ['duck', 'duck', 'dolphin']
grouped_list_of_class: the list of class grouped by the values of
a certain attribute, e.g. [('duck'), ('duck', 'dolphin')].
The Information_Gain for this example is 0.2516.
'''
sec2=np.sum([len(item)*self.Entropy(item) for item in grouped_list_of_class])/len(list_of_class)
return self.Entropy(list_of_class)-sec2
def Information_Ratio(self, list_of_class, grouped_list_of_class):
'''
Compute the Information Ratio.
list_of_class: an array of classification labels, e.g. ['duck', 'duck', 'dolphin']
grouped_list_of_class: the list of class grouped by the values of
a certain attribute, e.g. [('duck'), ('duck', 'dolphin')].
The Information_Ratio for this example is 0.2740.
'''
tmp=np.array([len(item)/len(list_of_class) for item in grouped_list_of_class])
intrinsic_value=-1*np.vdot(tmp, np.log2(tmp))
return self.Information_Gain(list_of_class, grouped_list_of_class)/intrinsic_value
def Gini(self, list_of_class):
'''
Compute the Gini value.
list_of_class: an array of classification labels, e.g. ['duck', 'duck', 'dolphin']
The Gini value for this example is 0.4444.
'''
count={}
for key in list_of_class:
count[key]=count.get(key, 0)+1
prob=np.array(tuple(count.values()))/len(list_of_class)
return 1-np.vdot(prob, prob)
def Gini_Index(self, list_of_class, grouped_list_of_class):
'''
Compute the Gini Index.
list_of_class: an array of classification labels, e.g. ['duck', 'duck', 'dolphin']
grouped_list_of_class: the list of class grouped by the values of
a certain attribute, e.g. [('duck'), ('duck', 'dolphin')].
The Gini Index for this example is 0.3333.
'''
return np.sum([len(item)*self.Gini(item) \
for item in grouped_list_of_class])/len(list_of_class)
def orderByGainOrRatio(self, D, A, by='Gain'):
'''
Return the order by Information Gain or Information Ratio.
by: 'Gain', 'Ratio'.
For the definition of D and A, see the remark in method 'fit'.
'''
tmp_value_dict=dict()
target_function=self.Information_Gain if by=='Gain' else self.Information_Ratio
for attr, info in A.items():
possibleVal=np.unique(D[:, info[0]])
# if the continuous attribute have only one possible value, then
# choosing it won't improve the model, so we abandon it.
if len(possibleVal)==1:
continue
if self.feature_discrete[attr] is True:
# discrete
if len(info)<2:
A[attr].append(possibleVal)
# retrieve the grouped list of class
grouped_list_of_class=[]
for val in possibleVal:
indexes=np.argwhere(D[:, info[0]]==val)
grouped_list_of_class.append(D[indexes, -1].flatten())
IC_value=target_function(D[:, -1], grouped_list_of_class)
tmp_value_dict[attr]=IC_value
else:
# continuous
cut_points=(possibleVal[: -1].astype(np.float32)+possibleVal[1:].astype(np.float32))/2
maxMetric=-1
for point in cut_points:
smaller_set=D[np.argwhere(D[:, info[0]]<=str(point)), -1].flatten()
bigger_set=D[np.argwhere(D[:, info[0]]>str(point)), -1].flatten()
# compute the metric
IC_tmp=target_function(D[:, -1], (smaller_set, bigger_set))
if IC_tmp>maxMetric:
maxMetric=IC_tmp
threshold=point
# set the threshold
if len(info)<2:
A[attr].append(threshold)
else:
A[attr][1]=threshold
tmp_value_dict[attr]=maxMetric
# find the attribute with the max tmp_value_dict value
attr_list=list(tmp_value_dict.keys())
attr_list.sort(key=lambda x: tmp_value_dict[x])
return attr_list
def orderByGiniIndex(self, D, A):
'''
Return the order by Gini Index.
For the definition of D and A, see the remark in method 'fit'.
'''
tmp_value_dict=dict()
for attr, info in A.items():
possibleVal=np.unique(D[:, info[0]])
# if the continuous attribute have only one possible value, then
# choosing it won't improve the model, so we abandon it.
if len(possibleVal)==1:
continue
if self.feature_discrete[attr] is True:
# discrete
if len(info)<2:
A[attr].append(possibleVal)
# retrieve the grouped list of class
grouped_list_of_class=[]
for val in possibleVal:
indexes=np.argwhere(D[:, info[0]]==val)
grouped_list_of_class.append(D[indexes, -1].flatten())
GI_value=self.Gini_Index(D[:, -1], grouped_list_of_class)
tmp_value_dict[attr]=GI_value
else:
# continuous
cut_points=(possibleVal[: -1].astype(np.float32)+possibleVal[1:].astype(np.float32))/2
minMetric=9999999999
for point in cut_points:
smaller_set=D[np.argwhere(D[:, info[0]]<=str(point)), -1].flatten()
bigger_set=D[np.argwhere(D[:, info[0]]>str(point)), -1].flatten()
# compute the metric
GI_tmp=self.Gini_Index(D[:, -1], (smaller_set, bigger_set))
if GI_tmp<minMetric:
minMetric=GI_tmp
threshold=point
# set the threshold
if len(info)<2:
A[attr].append(threshold)
else:
A[attr][1]=threshold
tmp_value_dict[attr]=minMetric
# return the attribute list sorted by tmp value
attr_list=list(tmp_value_dict.keys())
attr_list.sort(key=lambda x: tmp_value_dict[x])
return attr_list
def chooseAttribute(self, D, A):
'''
Choose an attribute from A according to the metrics above.
For the definition of D and A, see method 'fit'.
Different principal for different tree types:
ID3: choose the attribute that maximizes the Information Gain.
C4.5:
1, choose those attributes whose Information Gain are above average.
2, choose the one that maximizes the Gain Ratio from these attributes.
CART: choose the attribute that minimizes the Gini Index.
'''
if self.treeType=='ID3':
attr_list=self.orderByGainOrRatio(D, A, by='Gain')
return attr_list[-1]
if self.treeType=='C4.5':
attr_list=self.orderByGainOrRatio(D, A, by='Gain')
# for C4.5, we choose the attributes whose Gain are above average
# and then order them by Ratio.
sub_A={key: A[key] for key in attr_list}
attr_list=self.orderByGainOrRatio(D, sub_A, by='Ratio')
return attr_list[-1]
if self.treeType=='CART':
attr_list=self.orderByGiniIndex(D, A)
return attr_list[0]
def fit(self, D, A):
'''
Train the tree.
To save the training result:
>> self.tree=self.fit(D, A)
D: the training set, a size [m, n+1] numpy array (with str type elements),
where m is the number of training data and n is the number of attributes.
The last column of D is the classifications (or labels).
A: the attributes set. It is a dict with its structure being like
{attr_name: [index_in_D_columns, possibleVal_or_threshold], ...}
attr_name: name of the attribute
index_in_D_columns: the corresponding index of the attribute in ndarray D (starting from 0)
possibleVal_or_threshold:
###################################################
## This value may not always be available in A ##
## it is added after 'chooseAttribute' is called ##
## And it will be updated after each call ##
###################################################
1, if the attribute is discrete, then it is a ndarray containing all possible values
of this attribute.
2, if the attribute is continuous, then possibleVal_or_threshold is the most recent
threshold.
'''
if len(D)==0:
node=Node(feature_name='leaf-'+str(self.leaf_count), isLeaf=True, \
classification=self.tmp_classification)
self.leaf_count+=1
return node
if len(np.unique(D[:, -1]))<=1:
node=Node(feature_name='leaf-'+str(self.leaf_count), isLeaf=True, \
classification=D[0, -1])
self.leaf_count+=1
return node
if len(A)==0 or len(np.unique(D[:, :-1], axis=0))<=1:
count_dict={}
for key in D[:, -1]:
count_dict[key]=count_dict.get(key, 0)+1
most_frequent=sorted(D[:, -1], key=lambda x: count_dict[x])[-1]
node=Node(feature_name='leaf-'+str(self.leaf_count), isLeaf=True, \
classification=most_frequent)
self.leaf_count+=1
return node
count_dict={}
for key in D[:, -1]:
count_dict[key]=count_dict.get(key, 0)+1
most_frequent=sorted(D[:, -1], key=lambda x: count_dict[x])[-1]
self.tmp_classification=most_frequent
# choose target attribute
target_attr=self.chooseAttribute(D, A)
# print(target_attr)
# generate nodes for each possible values of the target attribute if it's discrete
# generate two nodes for the two classification if it's continuous
# related information is stored in A[target_attr][1] now,
# since we have called chooseAttribute at least once.
info=A[target_attr]
if self.feature_discrete[target_attr]:
node=Node(feature_name=target_attr, discrete=True, isLeaf=False)
# generate nodes for each possible values
for possibleVal in info[1]:
keys=set(A.keys()).difference({target_attr})
# connect node to its child
tmp_D=D[np.argwhere(D[:, info[0]]==possibleVal), :]
tmp_A={key: A[key] for key in keys}
node[possibleVal]=self.fit(tmp_D.reshape((tmp_D.shape[0], tmp_D.shape[2])), tmp_A)
else:
# continuous
threshold=info[1]
node=Node(feature_name=target_attr, discrete=False, threshold=threshold,isLeaf=False)
tmp_D=D[np.argwhere(D[:, info[0]]<=str(threshold)), :]
node['<=']=self.fit(tmp_D.reshape((tmp_D.shape[0], tmp_D.shape[2])), A)
tmp_D=D[np.argwhere(D[:, info[0]]>str(threshold)), :]
node['>']=self.fit(tmp_D.reshape((tmp_D.shape[0], tmp_D.shape[2])), A)
return node
def post_prune(self, training_D, testing_D, A, current=None, parent=None):
'''
self.tree is required.
This method conducts the post-pruning to enhance the model performance.
To make sure this method will work, set
>> current=self.tree
when you call it.
'''
self.current_accuracy=self.evaluate(testing_D, A)
count_dict={}
if len(training_D)==0:
return
# print(training_D)
for key in training_D[:, -1]:
count_dict[key]=count_dict.get(key, 0)+1
most_frequent=sorted(training_D[:, -1], key=lambda x: count_dict[x])[-1]
leaf_parent=True
for key, node in current.map.items():
if not node.isLeaf:
leaf_parent=False
# Recursion, DFS
if node.discrete:
tmp_D=training_D[np.argwhere(training_D[:, A[current.feature_name][0]]==key), :]
else:
if key=='<=':
tmp_D=training_D[np.argwhere(training_D[:, A[current.feature_name][0]]<=str(node.threshold)), :]
else:
tmp_D=training_D[np.argwhere(training_D[:, A[current.feature_name][0]]>str(node.threshold)), :]
self.post_prune(tmp_D.reshape((tmp_D.shape[0], tmp_D.shape[2])), testing_D, A, parent=current, current=node)
tmp_node=Node(feature_name='leaf-'+str(self.leaf_count), isLeaf=True, classification=most_frequent)
if parent:
# when current node is not the root
for key, node in parent.map.items():
if node==current:
parent.map[key]=tmp_node
saved_key=key
break
# compare the evaluation, if it is enhanced then prune the tree.
tmp_accuracy=self.evaluate(testing_D, A)
if tmp_accuracy<self.current_accuracy:
parent.map[saved_key]=current
else:
self.current_accuracy=tmp_accuracy
self.leaf_count+=1
return
else:
# when current node is the root
saved_tree=self.tree
self.tree=tmp_node
tmp_accuracy=self.evaluate(testing_D, A)
if tmp_accuracy<self.current_accuracy:
self.tree=saved_tree
else:
self.current_accuracy=tmp_accuracy
self.leaf_count+=1
return
def predict(self, D, A):
'''
Predict the classification for the data in D.
For the definition of A, see method 'fit'.
There is one critical difference between D and that defined in 'fit':
the last column may or may not be the labels.
This method works as long as the feature index in A matches the corresponding
column in D.
'''
row, _=D.shape
pred=np.empty((row, 1), dtype=str)
tmp_data={key: None for key in A.keys()}
for i in range(len(D)):
for key, info in A.items():
tmp_data[key]=D[i, info[0]]
pred[i]=self.tree(tmp_data)
return pred
def evaluate(self, testing_D, A):
'''
Evaluate the performance of decision tree. (Accuracy)
For definition of testing_D and A, see 'predict'.
However, here the testing_D is required to be labelled, that is, its last column
should be labels of the data.
'''
true_label=testing_D[:, -1]
pred_label=self.predict(testing_D, A)
success_count=0
for i in range(len(true_label)):
if true_label[i]==pred_label[i]:
success_count+=1
return success_count/len(true_label)
# try rename 'train' as 'fit' and run again, what is wrong?
if __name__=='__main__':
# training_D=np.genfromtxt('../../dataset/watermelon/watermelon2.0_train.txt', skip_header=1, dtype=str)
# testing_D=np.genfromtxt('../../dataset/watermelon/watermelon2.0_test.txt', skip_header=1, dtype=str)
# # print(training_D, testing_D, sep='\n')
# A={'色泽':[0], '根蒂':[1], '敲击':[2], '纹理':[3], '脐部':[4], '触感':[5]}
# feature_discrete={'色泽':True, '根蒂':True, '敲击':True, '纹理':True, '脐部':True, '触感':True}
# dectree=baseClassDecisionTree(feature_discrete=feature_discrete, treeType='ID3')
# dectree.tree=dectree.fit(training_D, A)
# print(dectree.tree)
# print(dectree.evaluate(testing_D, A))
# dectree.prune(training_D, testing_D, A, current=dectree.tree)
# print(dectree.evaluate(testing_D, A))
# print(dectree.tree)
# training_D=np.genfromtxt('../../dataset/watermelon/watermelon3.0_train.txt', skip_header=1, dtype=str)
# testing_D=np.genfromtxt('../../dataset/watermelon/watermelon3.0_test.txt', skip_header=1, dtype=str)
# # print(D)
# A={'色泽':[0], '根蒂':[1], '敲声':[2], '纹理':[3], '脐部':[4], '触感':[5], '密度':[6], '含糖率':[7]}
# feature_discrete={'色泽':True, '根蒂':True, '敲声':True, '纹理': True, '脐部': True, '触感':True, '密度':False, '含糖率':False}
# dectree=baseClassDecisionTree(feature_discrete=feature_discrete, treeType='ID3')
# dectree.tree=dectree.fit(training_D, A)
# print(dectree.tree)
# print(dectree.evaluate(testing_D, A))
# dectree.prune(training_D, testing_D, A, current=dectree.tree)
# print(dectree.evaluate(testing_D, A))
# print(dectree.tree)
D=np.genfromtxt('dataset/iris_train.txt', dtype=str)
np.random.shuffle(D)
k=len(D)
attribute_name=['sepal length', 'sepal width', 'petal length', 'petal width']
A={name: [i] for i, name in enumerate(attribute_name)}
feature_discrete={name: False for name in attribute_name}
conf={'A': A, 'feature_discrete': feature_discrete, 'treeType':'CART'}
dt=decisionTree(conf)
dt.train(D[:len(D)//2])
print(dt.tree)
print(dt.eval(D[len(D)//2:]))
dt.prune(D[:len(D)//2], D[len(D)//2:])
print(dt.tree)
print(dt.eval(D[len(D)//2:]))
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
19081,
1330,
19081,
198,
198,
2,
900,
262,
40708,
664,
24197,
7095,
994,
13,
198,
17597,
13,
2617,
8344,
24197,
32374,
7,
49388,
... | 2.028666 | 9,349 |
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""
"""
# end_pymotw_header
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", metavar="in-file", type=argparse.FileType("rt"))
parser.add_argument("-o", metavar="out-file", type=argparse.FileType("wt"))
try:
results = parser.parse_args()
print("Input file:", results.i)
print("Output file:", results.o)
except IOError as msg:
parser.error(str(msg))
| [
2,
15069,
357,
66,
8,
3050,
15115,
5783,
9038,
13,
220,
1439,
2489,
10395,
13,
198,
2,
198,
37811,
198,
37811,
198,
198,
2,
886,
62,
79,
4948,
313,
86,
62,
25677,
198,
11748,
1822,
29572,
198,
198,
48610,
796,
1822,
29572,
13,
281... | 2.714286 | 168 |
import pandas as pd
from enum import Enum
import random
from game.division import Division
from game.team import Team
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
33829,
1330,
2039,
388,
198,
11748,
4738,
198,
6738,
983,
13,
21426,
1330,
7458,
198,
6738,
983,
13,
15097,
1330,
4816,
628,
198
] | 3.870968 | 31 |
__all__ = ["models", "async_beatsaver", "beatsaver"] | [
834,
439,
834,
796,
14631,
27530,
1600,
366,
292,
13361,
62,
1350,
1381,
8770,
1600,
366,
1350,
1381,
8770,
8973
] | 2.6 | 20 |
""" pre = state in previous time step (2+N vector)
curr = current state (2+N vector)
N = number of phases """
if __name__ == "__main__":
pre = [0, 0, 1, 2, 3]
curr = [0, 0, 5, 6, 7]
N = 3
#print (delayReward(pre,curr,N))
| [
37811,
662,
796,
1181,
287,
2180,
640,
2239,
357,
17,
10,
45,
15879,
8,
198,
220,
220,
220,
1090,
81,
796,
1459,
1181,
357,
17,
10,
45,
15879,
8,
198,
220,
220,
220,
399,
796,
1271,
286,
21164,
37227,
628,
628,
198,
361,
11593,
... | 2.223214 | 112 |
import numpy as np
import matplotlib.pyplot as plt
import time
import os
import scipy.sparse as sps
if __name__ == "__main__":
main()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
629,
541,
88,
13,
82,
29572,
355,
599,
82,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
... | 2.74 | 50 |
from django.db import models
# Create your models here.
class Comment(models.Model):
"""
评论
"""
obj_type_id = models.IntegerField('对象类别') # 1.博客 2.评论
obj_id = models.IntegerField('对象id')
username = models.CharField('用户名', max_length=50, null=True, blank=True)
name = models.CharField('姓名', max_length=50, null=True, blank=True)
email = models.EmailField('邮箱', null=True, blank=True)
content = models.TextField('内容')
source_ip = models.CharField('来源ip', max_length=20)
creator = models.CharField('创建人', max_length=50)
gmt_created = models.DateTimeField('创建时间', auto_now_add=True, help_text="")
gmt_modified = models.DateTimeField('修改时间', auto_now=True, help_text="")
is_deleted = models.BooleanField('已删除', default=False)
def get_replys(self):
"""
评论的回复
:return:
"""
return Comment.objects.filter(obj_type_id=2, obj_id=self.id, is_deleted=False)
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
2,
13610,
534,
4981,
994,
13,
628,
198,
4871,
18957,
7,
27530,
13,
17633,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
5525,
107,
226,
164,
106,
118,
198,
220,
220,
220,
... | 2.071895 | 459 |
#!/usr/bin/env python
import json
import sys
import os
from web3 import Web3
from eth_account import Account
def getenv_or_exit(name):
""" Gets the required variable from the environment. Closes the application
with error if it's not set.
Args:
name (string) - The name of required environment variable.
Return:
var (respective type) - The value of the variable.
"""
var = os.getenv(name)
if var is None:
sys.exit("Please set the environment variable {}".format(name))
else:
return var
# Establishes the web3 provider. Also gets the average gas price.
web3 = Web3(Web3.HTTPProvider(getenv_or_exit("RPC")))
if web3.isConnected():
print("Connected to the network!")
else:
sys.exit("Could not connect to network. Check your RPC settings.")
CONFIRMATIONS = int(getenv_or_exit("CONFIRMATIONS"))
TARGET = int(getenv_or_exit("TARGET"))
TARGET_TIME = int(getenv_or_exit("TARGET_TIME"))
ADDRESS = getenv_or_exit("ADDRESS")
if not web3.isAddress(ADDRESS):
if not web3.isChecksumAddress(ADDRESS):
sys.exit("Invalid ADDRESS granted")
else:
ADDRESS = web3.toChecksumAddress(ADDRESS)
PRIV_KEY = getenv_or_exit("PRIV_KEY")
ACCOUNT = Account.privateKeyToAccount(PRIV_KEY)
# Configuration warnings.
if TARGET * ((CONFIRMATIONS + 1) * 16.5) > TARGET_TIME:
print(
"Strongly advising you to reconsider the configuration!"
"\nAccording to average mining and confirmation speed,"
"this is nearly impossible. Performance is not guaranteed."
"\nAlso it can lead to excessive expenditures."
)
elif TARGET_TIME / (TARGET * 60) <= 1:
print(
"Current configuration targets are hard to reach"
"due to possible network fluctuations."
)
# May vary from the current situation on Blockchain.
BASE_PRICE = int(web3.eth.gasPrice / 1)
# Creates contract instance.
if os.path.exists("abi.json") and os.path.isfile("abi.json"):
with open("abi.json") as file:
abi = json.load(file)
INSTANCE = web3.eth.contract(address=ADDRESS, abi=abi)
else:
sys.exit("ABI should be present in file 'abi.json'")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
33918,
198,
11748,
25064,
198,
11748,
28686,
198,
6738,
3992,
18,
1330,
5313,
18,
198,
6738,
4555,
62,
23317,
1330,
10781,
628,
198,
4299,
651,
24330,
62,
273,
62,
37023,
7,
367... | 2.741688 | 782 |
#!/usr/bin/env python3
# Hugo Hromic <hugo.hromic@insight-centre.org>
"""Generate timeline and events in JSON representation for visualisation."""
import json
from argparse import ArgumentParser
from helpers.utils import get_reader, get_writer, read_step_communities
from helpers.timeline import Timeline
def main():
"""Module entry-point."""
epilog = "You can omit filenames to use standard input/output."
parser = ArgumentParser(description=__doc__, epilog=epilog)
parser.add_argument(
"--timeline", metavar="FILENAME",
help="filename for the input timeline (in dynamic tracker text format)")
parser.add_argument(
"--steps-dir", metavar="DIRECTORY", default="./",
help="directory with community step files (in dynamic tracker text format)")
parser.add_argument(
"--expansion-threshold", metavar="FLOAT", type=float, default=0.10,
help="growth threshold for detecting expansions (percentual)")
parser.add_argument(
"--contraction-threshold", metavar="FLOAT", type=float, default=0.10,
help="reduction threshold for detecting contractions (percentual)")
parser.add_argument(
"--output", metavar="FILENAME",
help="filename for the output timeline (in JSON format)")
parser.add_argument(
"--events", metavar="FILENAME", required=True,
help="filename for the output timeline events (in JSON format)")
args = parser.parse_args()
with get_reader(args.timeline) as reader:
timeline = Timeline(reader)
splits = timeline.find_splits()
merges = timeline.find_merges()
splits.remove_orphans(timeline)
births = timeline.find_births()
deaths = timeline.find_deaths()
intermittents = timeline.find_intermittents()
step_communities = read_step_communities(args.steps_dir)
expansions = timeline.find_expansions(step_communities, args.expansion_threshold)
contractions = timeline.find_contractions(step_communities, args.contraction_threshold)
with get_writer(args.output) as writer:
timeline.to_vis_json(writer)
with get_writer(args.events) as writer:
writer.write("%s\n" % json.dumps({
"splits": splits,
"births": births,
"merges": merges,
"deaths": deaths,
"intermittents": intermittents,
"expansions": expansions,
"contractions": contractions,
}, separators=(",", ":")))
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
25930,
367,
398,
291,
1279,
71,
1018,
78,
13,
71,
398,
291,
31,
1040,
432,
12,
1087,
260,
13,
2398,
29,
198,
198,
37811,
8645,
378,
15264,
290,
2995,
287,
19449,
10552,
329,... | 2.665957 | 940 |
from botocore.exceptions import ClientError
from progress.bar import ChargingBar
import boto3
BUCKET_NAME = 'your_bucket_name'
PROFILE = 'stage' # Select required profile from ~/.aws/credentials
session = boto3.Session(profile_name=PROFILE)
stage_s3_client = session.client('s3')
s3 = session.resource('s3')
bucket = s3.Bucket(BUCKET_NAME)
all_obj = list(bucket.objects.all())
all_obj_len = len(all_obj)
error_obj = []
print(f'Total to be processed: {all_obj_len}\n')
with CustomBar('Processing', max=all_obj_len) as bar:
for idx, obj in enumerate(all_obj, 1):
try:
acl = obj.Acl()
_is_acl_read = is_acl_read(acl.grants)
if not _is_acl_read:
error_obj.append(obj.key)
obj = s3.Object(BUCKET_NAME, obj.key)
obj.Acl().put(ACL='public-read')
except ClientError as e:
print(f'Exception for file {obj.key}: {e.response}')
bar.next()
print(f'\n{len(error_obj)} images fixed')
| [
6738,
10214,
420,
382,
13,
1069,
11755,
1330,
20985,
12331,
198,
6738,
4371,
13,
5657,
1330,
14797,
278,
10374,
198,
11748,
275,
2069,
18,
198,
198,
33,
16696,
2767,
62,
20608,
796,
705,
14108,
62,
27041,
316,
62,
3672,
6,
198,
31190,... | 2.198257 | 459 |
from .nms_nd import nms_nd
from .bbox_overlaps_nd import bbox_overlaps_nd
from .roi_align_nd import RoIAlign
from .roi_align_rotated_nd import RoIAlignRotated
from .deform_pool_2d import DeformRoIPooling2dPack, ModulatedDeformRoIPooling2dPack
from .deform_conv_2d import DeformConv2dPack
from .modulated_deform_conv_2d import ModulatedDeformConv2dPack
from .deform_conv_3d import DeformConv3dPack
from .modulated_deform_conv_3d import ModulatedDeformConv3dPack
__all__ = [
'nms_nd',
'bbox_overlaps_nd',
'RoIAlign',
'RoIAlignRotated',
'DeformRoIPooling2dPack',
'ModulatedDeformRoIPooling2dPack',
'DeformConv2dPack',
'ModulatedDeformConv2dPack',
'DeformConv3dPack',
'ModulatedDeformConv3dPack'
] | [
6738,
764,
77,
907,
62,
358,
1330,
299,
907,
62,
358,
198,
6738,
764,
65,
3524,
62,
2502,
75,
1686,
62,
358,
1330,
275,
3524,
62,
2502,
75,
1686,
62,
358,
198,
6738,
764,
305,
72,
62,
31494,
62,
358,
1330,
5564,
40,
2348,
570,
... | 2.274691 | 324 |
import mock
from xml2kinto.kinto import get_kinto_records
| [
11748,
15290,
198,
6738,
35555,
17,
74,
20424,
13,
74,
20424,
1330,
651,
62,
74,
20424,
62,
8344,
3669,
628,
628,
628,
628
] | 2.826087 | 23 |
"""
Author: Alexandre Lepage
Date: May 2019
"""
class TheStack:
"""
Custom stack class
"""
__keys:list # The array holding the keys
__intitial_size:int = 8 # The size of the array holding the stack
def __init__(self):
"""
Contructor of the Class
Initialized the list and top,
initinial size of the list is 8.
-----PSEUDO CODE-----
Stack()
Let S[0..7] be a new array
S.top = -1
-----PSEUDO CODE-----
"""
self.__keys = [0] * self.__intitial_size
self.__top = -1
def push(self, k):
"""
Add an element to the top of the stack
-----PSEUDO CODE-----
(S is the Stack, k is the element to be added to the stack)
Push(S,k)
S.top = S.top + 1
if S.top == S.size
IncreaseSize(S)
S[S.top] = k
-----PSEUDO CODE-----
Keyword argument:
k: the element to be added to the stack
"""
self.__top += 1
if self.__top == len(self.__keys):
self.__increase_size()
self.__keys[self.__top] = k
def pop(self):
"""
Remove an element from the top of the stack
-----PSEUDO CODE-----
(S is the Stack)
Pop(S)
if S.top < 0
Throw Error
else
S.top = S.top - 1
return S[S.top + 1]
-----PSEUDO CODE-----
Return:
var: the element at the top of the stack
"""
if self.__top < 0:
raise Exception("Cannot pop: Stack is empty!")
else:
self.__top -= 1
return self.__keys[self.__top + 1]
def count(self):
"""
Return the amount of elements in the stack
-----PSEUDO CODE-----
(S is the Stack)
Count(S)
return S.top + 1
-----PSEUDO CODE-----
Return:
int: the amount of the element in the stack
"""
return self.__top + 1
def empty(self):
"""
Return true if the stack has 0 element
-----PSEUDO CODE-----
(S is the Stack)
Empty(S)
if S.top - 1 < 0
return True
else
return False
-----PSEUDO CODE-----
Return:
bool: True if stack is empty
"""
return self.__top < 0
def __increase_size(self):
"""
Increase the size of the list holding the elements
-----PSEUDO CODE-----
(S is the Stack)
IncreaseSize(S)
let T[0..(length of S * 2)] be a new array
Copy all S's element over to T
S = T
-----PSEUDO CODE-----
"""
T = [0] * (len(self.__keys) * 2)
for i in range(0,len(self.__keys)):
T[i] = self.__keys[i]
self.__keys = T
def to_list(self):
"""
Return a list of all the element in the stack,
in the same order pop would have returned it.
-----PSEUDO CODE-----
(S is the Stack)
ToList(S)
let T[0..(S.top + 1)] be a new array
for i = S.top down to 0
T[i] = S[S.top - i]
return T
-----PSEUDO CODE-----
Return:
list: the stack in a list form
"""
T = [0] * (self.__top + 1)
for i in range(self.__top, -1, -1):
T[i] = self.__keys[self.__top - i]
return T
def peek(self):
"""
Return the element on top of the stack,
without removing it.
-----PSEUDO CODE-----
(S is the Stack)
Peek(S)
if S.top < 0
Throw Error
return S[S.top]
-----PSEUDO CODE-----
Return:
var: the element on top of the stack
"""
if self.__top < 0:
raise Exception("Cannot peek: Stack is empty!")
return self.__keys[self.__top]
def clear(self):
"""
Remove all elements from the stack,
and initialized the list and top,
initinial size of the list is 8.
-----PSEUDO CODE-----
(S is the Stack)
Clear(S)
let S[0..7]
S.top = -1
-----PSEUDO CODE-----
"""
self.__keys = [0] * self.__intitial_size
self.__top = -1
| [
37811,
198,
13838,
25,
21000,
260,
42957,
496,
198,
10430,
25,
1737,
13130,
198,
37811,
198,
198,
4871,
383,
25896,
25,
198,
220,
220,
220,
37227,
220,
198,
220,
220,
220,
8562,
8931,
1398,
198,
220,
220,
220,
37227,
198,
220,
220,
... | 1.948005 | 2,231 |
import requests
import json
from requests.auth import HTTPDigestAuth
from utils.errors import check_error
| [
11748,
7007,
198,
11748,
33918,
198,
6738,
7007,
13,
18439,
1330,
7154,
51,
5760,
328,
395,
30515,
198,
6738,
3384,
4487,
13,
48277,
1330,
2198,
62,
18224,
628
] | 3.821429 | 28 |
from files.ListObject import variables, variableExist, callFunction
from files.Operations import add, subtract, multiply, divide
from files.FunctionsForSython import error | [
6738,
3696,
13,
8053,
10267,
1330,
9633,
11,
7885,
3109,
396,
11,
869,
22203,
198,
6738,
3696,
13,
18843,
602,
1330,
751,
11,
34128,
11,
29162,
11,
14083,
198,
6738,
3696,
13,
24629,
2733,
1890,
50,
7535,
1330,
4049
] | 4.384615 | 39 |