content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""External filestore interface. Cannot be depended on by filestore code."""
import filestore
import filestore.filesystem
import filestore.git
import filestore.github_actions
import filestore.gsutil
import filestore.no_filestore
import filestore.gitlab
FILESTORE_MAPPING = {
'filesystem': filestore.filesystem.FilesystemFilestore,
'gsutil': filestore.gsutil.GSUtilFilestore,
'github-actions': filestore.github_actions.GithubActionsFilestore,
'git': filestore.git.GitFilestore,
# TODO(metzman): Change to "no-filestore"
'no_filestore': filestore.no_filestore.NoFilestore,
'gitlab': filestore.gitlab.GitlabFilestore,
}
def get_filestore(config):
"""Returns the correct filestore object based on the platform in |config|.
Raises an exception if there is no correct filestore for the platform."""
if config.platform == config.Platform.EXTERNAL_GITHUB:
ci_filestore = filestore.github_actions.GithubActionsFilestore(config)
if not config.git_store_repo:
return ci_filestore
return filestore.git.GitFilestore(config, ci_filestore)
filestore_cls = FILESTORE_MAPPING.get(config.filestore)
if filestore_cls is None:
raise filestore.FilestoreError(
f'Filestore: {config.filestore} doesn\'t exist.')
return filestore_cls(config)
| [
2,
15069,
33448,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 2.951027 | 633 |
# -*- coding: utf-8 -*-
"""
pypeline.core
~~~~~~~~~~~~~
this module contains all the processing method, and advanced algorithms for OCT signal processing
calibrate:
disp_comp:
sp2struct:
@phase, @intensity, @complex
despeckle: (2D, 3D, etc.)
angiograph: (2D, 3D, etc.)
@speckle_var, @
"""
# register/load all the functions in the settings folder, so can be found and called to form pipeline
# use watchdog to monitor the content change in the settings folder
__all__ = ['pipeline', 'funcwrap']
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
79,
4464,
4470,
13,
7295,
198,
15116,
8728,
93,
198,
198,
5661,
8265,
4909,
477,
262,
7587,
2446,
11,
290,
6190,
16113,
329,
42256,
6737,
7587,
198,
1... | 2.994152 | 171 |
from homura import download
| [
6738,
3488,
5330,
1330,
4321,
198
] | 4.666667 | 6 |
import base64
import six
import uuid
from mimetypes import guess_extension, guess_type
import io
from api import app
import boto3
import logging
import botocore.exceptions
BUCKET = app.config["S3_BUCKET_NAME"]
def parse_graduation_date(date):
""" Parses graduation date string generated by frontend into suitable format
Parameters
----------
date : str
string of user inputted graduation date.
Returns
-------
string
string of graduation date only with Month and Year.
"""
date_splitted = date.split("-")
year = date_splitted[0]
month = date_splitted[1]
months = {
"01": "January",
"02": "February",
"03": "March",
"04": "April",
"05": "May",
"06": "June",
"07": "July",
"08": "August",
"09": "September",
"10": "October",
"11": "November",
"12": "December"
}
return months[month] + " " + year
def get_file_extension(data):
""" Helper function to get file extension of base64 file
Parameters
----------
date : str
base64 representation of a file/data.
Returns
-------
string
extension of the base64 file/data.
"""
extension = (data.split(";")[0]).split(":")[-1]
return extension
def decode_and_upload_base64_file(data, file_name):
""" Function to decode base64 files
Parameters
----------
date : str
string of user inputted graduation date.
Returns
-------
string
string of graduation date only with Month and Year.
"""
url_data = upload_base64_file(data, BUCKET, file_name)
return url_data
def upload_base64_file(data, bucket_name, file_name):
""" Helper function to decode and upload base64 files
Parameters
----------
data : str
base64 data of file
bucket_name : str
name of S3 bucket.
file_name : str
name of file + path to be saved within s3.
Returns
-------
string
URL of file within s3.
"""
logging.info("Retrieving base64 data of file.")
file_extension = get_file_extension(data)
base64_data = data.split(",")[-1]
try:
logging.info("Decoding base64 data into binary data")
decoded_file = base64.b64decode(base64_data)
except Exception as e:
logging.error(e)
try:
logging.info("Uploading file into s3 bucket.")
client = boto3.client('s3')
client.upload_fileobj(
io.BytesIO(decoded_file),
bucket_name,
file_name,
ExtraArgs={
'ACL': 'public-read',
"ContentType": file_extension
}
)
except Exception as e:
raise e
return f"https://{bucket_name}.s3.amazonaws.com/{file_name}"
| [
11748,
2779,
2414,
198,
11748,
2237,
198,
11748,
334,
27112,
198,
6738,
17007,
2963,
12272,
1330,
4724,
62,
2302,
3004,
11,
4724,
62,
4906,
198,
11748,
33245,
198,
6738,
40391,
1330,
598,
198,
11748,
275,
2069,
18,
198,
11748,
18931,
19... | 2.227442 | 1,341 |
STRUCTURED_TYPE = 'structured'
ML_MODEL_FILE_NAME_KEY = "ML_MODEL_FILE_NAME"
ML_MODEL_FILE_NAME_VALUE = "model"
SYS_DEPLOYMENT_FILE_NAME_KEY = "SYS_DEPLOYMENT_FILE_NAME"
SYS_DEPLOYMENT_FILE_NAME_VALUE = "deployment"
STATUS_UNAVAILABLE = 'unavailable'
SUCCESS_STATUSES = ['completed', 'available', 'success']
WARNING_STATUSES = ['queued', 'pending', 'processing', 'building', 'validating', 'deploying', 'running',
'confirmation', 'confirmation_pending']
ERROR_STATUSES = ['failed', 'cancelled_pending', 'cancelled']
DEFAULT_IGNORE_FILE = '.ubiops-ignore'
UPDATE_TIME = 30 # seconds to wait between update and new zip upload
| [
46126,
4261,
1961,
62,
25216,
796,
705,
7249,
1522,
6,
198,
5805,
62,
33365,
3698,
62,
25664,
62,
20608,
62,
20373,
796,
366,
5805,
62,
33365,
3698,
62,
25664,
62,
20608,
1,
198,
5805,
62,
33365,
3698,
62,
25664,
62,
20608,
62,
3948... | 2.584 | 250 |
# Generated by Django 2.0.3 on 2018-06-14 17:03
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
18,
319,
2864,
12,
3312,
12,
1415,
1596,
25,
3070,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
"""
Sync authorized users on Google Sites to Google Drive
"""
import pyiem.cscap_utils as util
import gdata.gauth
import gdata.sites.client as sclient
config = util.get_config()
def get_sites_client(config, site="sustainablecorn"):
"""Return an authorized sites client"""
token = gdata.gauth.OAuth2Token(
client_id=config["appauth"]["client_id"],
client_secret=config["appauth"]["app_secret"],
user_agent="daryl.testing",
scope=config["googleauth"]["scopes"],
refresh_token=config["googleauth"]["refresh_token"],
)
sites_client = sclient.SitesClient(site=site)
token.authorize(sites_client)
return sites_client
spr_client = get_sites_client(config)
service = util.get_driveclient(config)
site_users = []
for acl in spr_client.get_acl_feed().entry:
userid = acl.scope.value
if userid not in site_users:
site_users.append(acl.scope.value)
# Get a listing of current permissions
perms = (
service.permissions().list(fileId=config["cscap"]["folderkey"]).execute()
)
for item in perms.get("items", []):
email = item["emailAddress"]
if email in site_users:
site_users.remove(email)
continue
print("Email: %s can access Drive, not sites" % (email,))
for loser in site_users:
print(loser)
# continue
id_resp = service.permissions().getIdForEmail(email=loser).execute()
id2 = id_resp["id"]
print(
("Adding %s[%s] as writer to CSCAP Internal Documents Collection")
% (loser, id2)
)
newperm = dict(
id=id2, type="user", role="writer", sendNotificationEmails=False
)
res = (
service.permissions()
.insert(fileId=config["cscap"]["folderkey"], body=newperm)
.execute()
)
print(res)
| [
37811,
198,
28985,
10435,
2985,
319,
3012,
37034,
284,
3012,
9974,
198,
37811,
198,
11748,
12972,
26597,
13,
66,
1416,
499,
62,
26791,
355,
7736,
198,
11748,
308,
7890,
13,
4908,
1071,
198,
11748,
308,
7890,
13,
49315,
13,
16366,
355,
... | 2.52766 | 705 |
from string import ascii_uppercase as au
if __name__ == '__main__':
print(solution("KAKAO")) | [
6738,
4731,
1330,
355,
979,
72,
62,
7211,
2798,
589,
355,
35851,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
3601,
7,
82,
2122,
7203,
42,
10206,
32,
46,
48774
] | 2.552632 | 38 |
from django.db import models
from django.core.mail import send_mail
from django.contrib.auth import models as auth_models
from django.contrib.auth.base_user import AbstractBaseUser
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from .managers import UserManager | [
6738,
42625,
14208,
13,
9945,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1330,
4981,
198,
6738,
42625,
14208,
13,
7295,
13,
4529,
220,
220,
220,
220,
220,
220,
220,
220,
220... | 2.450331 | 151 |
import torch
import lpips
# from IPython import embed
import cv2
import numpy as np
import os
from os import listdir
from os.path import isfile, join
import re
use_gpu = False # Whether to use GPU
spatial = True # Return a spatial map of perceptual distance.
# Linearly calibrated models (LPIPS)
loss_fn = lpips.LPIPS(net='alex', spatial=spatial) # Can also set net = 'squeeze' or 'vgg'
# loss_fn = lpips.LPIPS(net='alex', spatial=spatial, lpips=False) # Can also set net = 'squeeze' or 'vgg'
if (use_gpu):
loss_fn.cuda()
## Example usage with dummy tensors
dummy_im0 = torch.zeros(1, 3, 64, 64) # image should be RGB, normalized to [-1,1]
dummy_im1 = torch.zeros(1, 3, 64, 64)
if (use_gpu):
dummy_im0 = dummy_im0.cuda()
dummy_im1 = dummy_im1.cuda()
dist = loss_fn.forward(dummy_im0, dummy_im1)
## Example usage with images
# replace with ours
# read image and load them
scenes=["lobby", "stones", "barbershop", "classroom"]
scenes=["stones"]
# scenes=["mc", "gallery"]
# scenes=["lobby"]
imgCount = 25
step = 5
fovCount = int((110-5)/step)
result = np.empty(shape=(fovCount * len(scenes) + 2, 2 + imgCount * 5))
result[0, 0] = 0 #scene id
result[0, 1] = 368 #fov
# result[0, 2] = 687 #our
# result[0, 3] = 6373 #nerf
# result[0, 4] = 36832 #fovea
result4curve = np.empty(shape = (imgCount*fovCount*len(scenes), 6))
# f=open('lpips_result_fova_anova.csv','a')
anova = np.empty(shape=(len(scenes)*imgCount+1, 2 + 3 * fovCount)) # 4*fovCount previously
# anova[0,0] = "scene"
# anova[0,1] = "imgid"
# for i in range(5,110,5):
# anova[0, 2 + 3 * (int(i / 5) - 1)] = "our-" + str(i)
# anova[0, 2 + 3 * (int(i / 5) - 1) + 1] = "nerf-" + str(i)
# anova[0, 2 + 3 * (int(i / 5) - 1) + 2] = "fovea-" + str(i)
for sceneID, scene in enumerate(scenes):
# if scene == "lobby":
# continue
# # if scene == "mc":
# # continue
# if sceneID < 4:
# continue
folder = './figs/' + scene
imgs_gt = [os.path.join(folder + '_gt', f) for f in listdir(folder + '_gt')
if re.match(r'view_[0-9]+.png', f)]
imgs_our = [os.path.join(folder + '_our', f) for f in listdir(folder + '_our') if isfile(join(folder + '_our', f))]
imgs_nerf = [os.path.join(folder + '_nerf', f) for f in listdir(folder + '_nerf') if isfile(join(folder + '_nerf', f))]
imgs_fgt = [os.path.join(folder + '_gt', f) for f in listdir(folder + '_gt') if re.match(r'view_[0-9]+_RT_k3.0.png', f)]
print(len(imgs_our),len(imgs_nerf))
for imgID in range(imgCount):
anova[sceneID * imgCount + imgID + 1, 0] = sceneID
anova[sceneID * imgCount + imgID + 1, 1] = imgID
# img_gt = cv2.imread(folder + '_gt' + '/view_' + f'{imgID:04d}' + '.png')
# G:\My Drive\eval_mono\mono\ref_as_left_eye\
img_gt = cv2.imread(imgs_gt[imgID])
img_our = cv2.imread(imgs_our[imgID])
img_nerf = cv2.imread(imgs_nerf[imgID])
# fgtpath = folder + '_gt' + '/view_' + f'{imgID:04d}' + '_RT_k3.0.png'
# img_fgt = cv2.imread(folder + '_gt' + '/view_' + f'{imgID:04d}' + '_RT_k3.0.png')
img_fgt = cv2.imread(imgs_fgt[imgID])
print(imgs_fgt[imgID],imgs_our[imgID],imgs_nerf[imgID])
# img_our_left = cv2.imread('./imgs/eval_mono/ref_as_left_eye/'+scene+'/view' + f'{imgID:04d}' + '_blended.png')
# img_our_right = cv2.imread(
# './imgs/eval_mono/ref_as_right_eye/' + scene + '/view' + f'{imgID:04d}' + '_blended.png')
# img_nerf = cv2.imread('./imgs/NeRF_'+scene+'/' + f'{imgID:03d}' + '.png')
# img_gtfova = cv2.imread('./imgs/gt_'+scene+'/view_' + f'{imgID:04d}' + '_RT_k3.0.png')
# print('./imgs/gt_' + scene + '/view_' + f'{imgID:04d}' + '.png')
print(img_gt.shape)
# print(img_gtfova.shape)
height, width = img_gt.shape[:2]
for fov in range(5,110,step):
rect_top = height / 2 - float(fov) / 110.0 * height / 2
rect_top = int(rect_top)
rect_btm = height / 2 + float(fov) / 110.0 * height / 2
rect_btm = int(rect_btm)
rect_left = width / 2 - float(fov) / 110.0 * width / 2
rect_left = int(rect_left)
rect_right = width / 2 + float(fov) / 110.0 * width / 2
rect_right = int(rect_right)
# print(rect_top,rect_btm,rect_left,rect_right)
crop_img_gt = img_gt[rect_top:rect_btm, rect_left:rect_right]
ex_ref = lpips.im2tensor(crop_img_gt[:,:,::-1])
crop_img_our = img_our[rect_top:rect_btm, rect_left:rect_right]
ex_p0 = lpips.im2tensor(crop_img_our[:, :, ::-1])
# crop_img_our_left = img_our_left[rect_top:rect_btm, rect_left:rect_right]
# ex_p0l = lpips.im2tensor(crop_img_our_left[:,:,::-1])
# crop_img_our_right = img_our_right[rect_top:rect_btm, rect_left:rect_right]
# ex_p0r = lpips.im2tensor(crop_img_our_right[:, :, ::-1])
crop_img_nerf = img_nerf[rect_top:rect_btm, rect_left:rect_right]
ex_p1 = lpips.im2tensor(crop_img_nerf[:,:,::-1])
# crop_img_gt_fova = img_gtfova[rect_top:rect_btm, rect_left:rect_right]
crop_img_fgt = img_fgt[rect_top:rect_btm, rect_left:rect_right]
ex_p2 = lpips.im2tensor(crop_img_fgt[:, :, ::-1])
if (use_gpu):
ex_ref = ex_ref.cuda()
ex_p0 = ex_p0.cuda()
# ex_p0l = ex_p0l.cuda()
# ex_p0r = ex_p0r.cuda()
ex_p1 = ex_p1.cuda()
ex_p2 = ex_p2.cuda()
ex_d0 = loss_fn.forward(ex_ref, ex_p0)
# ex_d0l = loss_fn.forward(ex_ref, ex_p0l)
# ex_d0r = loss_fn.forward(ex_ref, ex_p0r)
ex_d1 = loss_fn.forward(ex_ref, ex_p1)
ex_d2 = loss_fn.forward(ex_ref, ex_p2)
if not spatial:
# print('SPATIAL fov %d Distances: OUR %.3f, NERF %.3f' % (fov, ex_d0, ex_d1))
print('fov %d Distances: OUR %.3f, NeRF %.3f, FOVA %.3f' % (fov, ex_d0, ex_d1, ex_d2))
result[sceneID*fovCount + int(fov / 5 - 1) + 1, 0] = sceneID # scene id
result[sceneID*fovCount + int(fov / 5 - 1) + 1, 1] = fov
result[sceneID*fovCount + int(fov / 5 - 1) + 1, 0 * imgCount + 2+imgID] = ex_d0
result[sceneID*fovCount + imgID * 21 + int(fov / 5 - 1) + 1, 1 * imgCount + 2+imgID] = ex_d1
result[sceneID * fovCount + int(fov / 5 - 1) + 1, 2 * imgCount + 2+imgID] = ex_d2
anova[sceneID * imgCount + imgID + 1, 2 + 3 * int(fov / 5 - 1)] = ex_d0
anova[sceneID * imgCount + imgID + 1, 2 + 3 * int(fov / 5 - 1)+1] = ex_d1
anova[sceneID * imgCount + imgID + 1, 2 + 3 * int(fov / 5 - 1)+2] = ex_d2
result4curve[sceneID * fovCount * imgCount + imgID * fovCount + int(fov / step - 1)] = [sceneID, fov, ex_d0, ex_d1, ex_d2, imgID]
else:
print('fov %d Distances: OUR %.3f, NeRF %.3f, FOVA %.3f' % (
fov, ex_d0.mean(), ex_d1.mean(), ex_d2.mean())) # The mean distance is approximately the same as the non-spatial distance
exd0mean = ex_d0.mean()
exd1mean = ex_d1.mean()
exd2mean = ex_d2.mean()
# print('fov %d Distances: OUR %.3f,, NeRF %.3f' % (
# fov, ex_d0.mean(), ex_d1.mean(),
# )) # The mean distance is approximately the same as the non-spatial distance
result[sceneID * fovCount + int((fov-5) / step) + 1, 0] = sceneID # scene id
result[sceneID * fovCount + int((fov-5) / step) + 1, 1] = fov
result[sceneID * fovCount + int((fov-5) / step) + 1, 0 * imgCount + 2 + imgID] = ex_d0.mean()
# result[sceneID * fovCount + int((fov - 5) / step) + 1, 1 * imgCount + 2 + imgID] = ex_d0r.mean()
result[sceneID * fovCount + int((fov-5) / step) + 1, 1 * imgCount + 2 + imgID] = ex_d1.mean()
result[sceneID * fovCount + int((fov-5) / step) + 1, 2 * imgCount + 2+imgID] = ex_d2.mean()
fovidx = int((fov-5) / step)
anova[sceneID * imgCount + imgID + 1, 2 +fovidx ] = ex_d0.mean()
# anova[sceneID * imgCount + imgID + 1, 2 + fovCount + fovidx] = ex_d0r.mean()
anova[sceneID * imgCount + imgID + 1, 2 + fovCount * 1 + fovidx] = ex_d1.mean()
anova[sceneID * imgCount + imgID + 1, 2 + fovCount * 2 + fovidx] = ex_d2.mean()
# DEBUG
idx = sceneID * fovCount * imgCount + imgID * fovCount + int((fov-5) / step)
# print("idx",idx,"result4curve.shape",result4curve.shape)
# print(sceneID, fov, exd0mean, exd0mean.detach().numpy(), imgID)
result4curve[idx] = [sceneID, fov, exd0mean.detach().numpy(), exd1mean.detach().numpy(), exd2mean.detach().numpy(), imgID]
# Visualize a spatially-varying distance map between ex_p0 and ex_ref
# import pylab
# pylab.imshow(ex_d0[0, 0, ...].data.cpu().numpy())
# pylab.show()
# np.savetxt(f, anova[(sceneID) * 8+1:sceneID * 8+9], delimiter=',')
np.savetxt('lpips_curve_125_' + scene + '.csv', result4curve, delimiter=',')
np.savetxt('lpips_fov_125_' + scene+'.csv', result, delimiter=',')
np.savetxt('lpips_anova_125_' + scene+'.csv', anova, delimiter=',')
# crop_img = img[y:y+h, x:x+w]
# ex_ref = lpips.im2tensor(lpips.load_image('./imgs/ex_ref.png'))
# ex_p0 = lpips.im2tensor(lpips.load_image('./imgs/ex_p0.png'))
# ex_p1 = lpips.im2tensor(lpips.load_image('./imgs/ex_p1.png'))
| [
11748,
28034,
198,
11748,
300,
79,
2419,
198,
2,
422,
6101,
7535,
1330,
11525,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
6738,
28686,
1330,
1351,
15908,
198,
6738,
28686,
13,
6978,
1330,
318,
... | 1.910831 | 5,069 |
from __future__ import print_function
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
import json
# from google.auth.transport.requests import Request
# Better to use read only scope for not modifying the contents accidentally
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
# The ID and range of a sample spreadsheet.
RANGE_NAME = "A:Z"
# If run this function directly then it will generate two filders in the PWD
# one containing the data (modified, see below) from sheets API and the other
# containing the required dict
def convert_form_to_dict(SPREADSHEET_ID):
"""Uses sheets API to obtain result
Returns the required formatted list containing
nested dicts of responses obtained from the google sheet
"""
creds = None
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server()
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=SPREADSHEET_ID,
range=RANGE_NAME,
majorDimension='ROWS').execute()
values = result.get('values', [])
# Sheets api removing trailing empty spaces in the result
# If somebody has not filled some columns at the end
# then it will not be there in the json data
# example only till column[2] is filled then the rest will not
# be there in the json data of the API
# We will add a "" instead
# This is not the case when some data between is missing inbetween
# for example column[2] is missing but onwards are filled
# sheet automatically adds "" in this case
for item in result['values']:
length = len(item)
if (length < 7):
while(length != 7):
item.append("")
length = length + 1
print('{} values received'.format(len(values)))
all_responses = []
# Obtaining all course codes and making primary keys in Dict, appending
# this into the list
# Also renaming headings as they are in the wiki
values[0][2] = 'Concepts taught in class'
values[0][3] = 'Student Opinion'
values[0][4] = 'How to Crack the Paper'
values[0][5] = 'Classroom resources'
for item in values[1:]:
dict_format = {}
dict_format['Course Code'] = item[1]
dict_format['Timestamp'] = []
for element in values[0][2:]:
dict_format[element] = []
all_responses.append(dict_format)
# filling all the data into the required course code
for item in values[1:]:
for course_data in all_responses:
if(course_data['Course Code'] == item[1]):
course_data['Timestamp'].append(item[0])
index = 2
# ignoring the empty entries
for element in values[0][2:]:
if(item[index] != ""):
course_data[element].append(item[index])
index = index + 1
break
total = [all_responses, result]
return total[0]
if __name__ == '__main__':
answer = convert_form_to_dict()
with open('result.json', 'w') as f:
json.dump(answer[1], f, indent=2)
with open('required_dict.json', 'w') as f:
json.dump(answer[0], f, indent=2)
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
23645,
499,
291,
75,
1153,
13,
67,
40821,
1330,
1382,
198,
6738,
23645,
62,
18439,
62,
12162,
1071,
8019,
13,
11125,
1330,
2262,
4262,
4677,
37535,
198,
11748,
33918,
198,
2,
4... | 2.539941 | 1,352 |
"""
Name: Functions for customer data
Developer: Matt Clarke
Date: Jan 1, 2020
Description: Specific functions for generating customer data including RFM scores.
"""
from lifetimes.utils import summary_data_from_transaction_data
def rfm_model(df, customer_column, date_column, monetary_column):
"""Return an RFM score for each customer using the Lifetimes RFM model.
This score is calculated across the whole DataFrame, so if you have a
customer with numerous orders, it will calculate one value and apply
it across all orders and won't calculate the figure historically.
Args:
:param df: Pandas DataFrame
:param monetary_column: Column containing monetary value of order
:param date_column: Column containing date
:param customer_column: Column containing customer
Returns:
New DataFrame containing RFM data by customer.
T is equal to days since first order and end of period.
Customers with 1 order will be assigned 0 for RFM scores.
"""
# Ensure that inf and NaN values are filled
rfm_df = summary_data_from_transaction_data(df,
customer_column,
date_column,
monetary_value_col=monetary_column)
return rfm_df
| [
37811,
198,
5376,
25,
40480,
329,
6491,
1366,
198,
45351,
25,
4705,
19635,
198,
10430,
25,
2365,
352,
11,
12131,
198,
11828,
25,
17377,
5499,
329,
15453,
6491,
1366,
1390,
20445,
44,
8198,
13,
198,
37811,
198,
198,
6738,
3868,
46874,
... | 2.691089 | 505 |
from . import api
from flask import jsonify
| [
6738,
764,
1330,
40391,
198,
6738,
42903,
1330,
33918,
1958,
628
] | 4.090909 | 11 |
sum = float(input())
counter_of_coins = 0
sum = int(sum*100)
counter_of_coins += sum // 200
sum = sum % 200
counter_of_coins += sum // 100
sum = sum % 100
counter_of_coins += sum // 50
sum = sum % 50
counter_of_coins += sum // 20
sum = sum % 20
counter_of_coins += sum // 10
sum = sum % 10
counter_of_coins += sum // 5
sum = sum % 5
counter_of_coins += sum // 2
sum = sum % 2
if sum == 1:
counter_of_coins += 1
print(int(counter_of_coins)) | [
16345,
796,
12178,
7,
15414,
28955,
198,
198,
24588,
62,
1659,
62,
14624,
796,
657,
198,
16345,
796,
493,
7,
16345,
9,
3064,
8,
198,
24588,
62,
1659,
62,
14624,
15853,
2160,
3373,
939,
198,
16345,
796,
2160,
4064,
939,
198,
24588,
6... | 2.686747 | 166 |
from __future__ import print_function
import torch
from torch.autograd import Variable
import cv2
import time
from imutils.video import FPS, WebcamVideoStream
import argparse
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')
parser.add_argument('--weights', default='weights/ssd_300_VOC0712.pth',
type=str, help='Trained state_dict file path')
parser.add_argument('--cuda', default=False, type=bool,
help='Use cuda in live demo')
args = parser.parse_args()
COLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
FONT = cv2.FONT_HERSHEY_SIMPLEX
if __name__ == '__main__':
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from data import BaseTransform, VOC_CLASSES as labelmap
from ssd import build_ssd
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't using \ CUDA. Run with --cuda for optimal eval speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
net = build_ssd('test', 300, 21) # initialize SSD
net.load_state_dict(torch.load(args.weights))
transform = BaseTransform(net.size, (104/256.0, 117/256.0, 123/256.0))
fps = FPS().start()
cv2_demo(net.eval(), transform)
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# cleanup
cv2.destroyAllWindows()
stream.stop()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
28034,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
11748,
269,
85,
17,
198,
11748,
640,
198,
6738,
545,
26791,
13,
15588,
1330,
22082,
11,
5313,
20991,
10798,
12124... | 2.497143 | 700 |
INSTRS = []
with open("day5.input") as f:
INSTRS = [int(l.strip()) for l in f.readlines()]
# INSTRS = [0, 3, 0, 1, -3]
pc = 0
steps = 0
while pc >= 0 and pc < len(INSTRS):
offset = INSTRS[pc]
if offset >= 3:
INSTRS[pc] -= 1
else:
INSTRS[pc] += 1
pc += offset
steps += 1
print steps
| [
38604,
6998,
796,
17635,
198,
198,
4480,
1280,
7203,
820,
20,
13,
15414,
4943,
355,
277,
25,
198,
220,
220,
220,
40589,
6998,
796,
685,
600,
7,
75,
13,
36311,
28955,
329,
300,
287,
277,
13,
961,
6615,
3419,
60,
198,
198,
2,
40589,... | 2.075472 | 159 |
import datetime
import time
import warnings
import stripe
from stripe.test.helper import (StripeResourceTest, DUMMY_PLAN)
| [
11748,
4818,
8079,
198,
11748,
640,
198,
11748,
14601,
198,
198,
11748,
39858,
198,
6738,
39858,
13,
9288,
13,
2978,
525,
1330,
357,
1273,
380,
431,
26198,
14402,
11,
360,
5883,
26708,
62,
6489,
1565,
8,
628,
628
] | 3.315789 | 38 |
import argparse, os
from xml.dom.minidom import parse
parser = argparse.ArgumentParser()
parser.add_argument('--pom_file_path', type=str, default=None)
args = parser.parse_args()
pom_file_path = args.pom_file_path
"""
此脚本应用于修改maven pom.xml文件,可以将jsii生成的pom.xml文件转化成可进行maven发布的pom.xml文件
输入: pom_file_path
"""
if __name__ == '__main__':
write_xml()
| [
11748,
1822,
29572,
11,
28686,
198,
6738,
35555,
13,
3438,
13,
1084,
312,
296,
1330,
21136,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
48610,
13,
2860,
62,
49140,
10786,
438,
79,
296,
62,
7753,
62,
6978,
325... | 1.816327 | 196 |
from typing import Dict, List, Union
from numpy import float64
from gdsfactory.port import Port
def flip(port: Port) -> Port:
"""Returns port copy with Flip Port orientation."""
return port.flip()
def direction_ports_from_list_ports(optical_ports: List[Port]) -> Dict[str, List[Port]]:
"""Returns a dict of WENS ports."""
direction_ports = {x: [] for x in ["E", "N", "W", "S"]}
for p in optical_ports:
p.orientation = (p.orientation + 360.0) % 360
if p.orientation <= 45.0 or p.orientation >= 315:
direction_ports["E"].append(p)
elif p.orientation <= 135.0 and p.orientation >= 45.0:
direction_ports["N"].append(p)
elif p.orientation <= 225.0 and p.orientation >= 135.0:
direction_ports["W"].append(p)
else:
direction_ports["S"].append(p)
for direction, list_ports in list(direction_ports.items()):
if direction in ["E", "W"]:
list_ports.sort(key=lambda p: p.y)
if direction in ["S", "N"]:
list_ports.sort(key=lambda p: p.x)
return direction_ports
def check_ports_have_equal_spacing(list_ports: List[Port]) -> float64:
"""Returns port separation. Raises error if not constant."""
if not isinstance(list_ports, list):
raise ValueError(f"list_ports should be a list of ports, got {list_ports}")
if not list_ports:
raise ValueError("list_ports should not be empty")
orientation = get_list_ports_angle(list_ports)
if orientation in [0, 180]:
xys = [p.y for p in list_ports]
else:
xys = [p.x for p in list_ports]
seps = [round(abs(c2 - c1), 5) for c1, c2 in zip(xys[1:], xys[:-1])]
different_seps = set(seps)
if len(different_seps) > 1:
raise ValueError("Ports should have the same separation. Got {different_seps}")
return different_seps.pop()
def get_list_ports_angle(list_ports: List[Port]) -> Union[float64, int]:
"""Returns the orientation/angle (in degrees) of a list of ports."""
if not list_ports:
return None
if len({p.orientation for p in list_ports}) > 1:
raise ValueError(f"All port angles should be the same. Got {list_ports}")
return list_ports[0].orientation
if __name__ == "__main__":
import gdsfactory as gf
c = gf.components.mmi1x2()
d = direction_ports_from_list_ports(c.get_ports_list())
c.show()
| [
6738,
19720,
1330,
360,
713,
11,
7343,
11,
4479,
198,
198,
6738,
299,
32152,
1330,
12178,
2414,
198,
198,
6738,
308,
9310,
69,
9548,
13,
634,
1330,
4347,
628,
198,
4299,
14283,
7,
634,
25,
4347,
8,
4613,
4347,
25,
198,
220,
220,
2... | 2.434476 | 992 |
import tweepy
from config import create_api
# auth = tweepy.OAuthHandler("CONSUMER_KEY", "CONSUMER_SECRET")
# auth.set_access_token("ACCESS_TOKEN", "ACCESS_TOKEN_SECRET")
| [
11748,
4184,
538,
88,
201,
198,
201,
198,
6738,
4566,
1330,
2251,
62,
15042,
201,
198,
201,
198,
201,
198,
2,
6284,
796,
4184,
538,
88,
13,
23621,
1071,
25060,
7203,
10943,
50,
5883,
1137,
62,
20373,
1600,
366,
10943,
50,
5883,
1137... | 2.381579 | 76 |
from contextlib import ExitStack
from trace_cockpit.models import TraceConfig
| [
6738,
4732,
8019,
1330,
29739,
25896,
198,
198,
6738,
12854,
62,
21517,
15544,
13,
27530,
1330,
34912,
16934,
628,
628
] | 4.1 | 20 |
# -*- coding: utf-8 -*-
# Copyright 2019 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from collections import namedtuple
from typing import TYPE_CHECKING, Optional, Tuple, List, Set
from iconcommons import Logger
from ..reward_calc.msg_data import Header, TxData, PRepsData, TxType, make_block_produce_info_key
from ...base.exception import DatabaseException, InternalServiceErrorException
from ...database.db import KeyValueDatabase
from ...icon_constant import (
DATA_BYTE_ORDER, Revision, RC_DATA_VERSION_TABLE, RC_DB_VERSION_0,
IISS_LOG_TAG, ROLLBACK_LOG_TAG
)
from ...iiss.reward_calc.data_creator import DataCreator
from ...utils import bytes_to_hex
from ...utils.msgpack_for_db import MsgPackForDB
if TYPE_CHECKING:
from ...base.address import Address
from ...database.wal import IissWAL
from ..reward_calc.msg_data import Data, DelegationInfo
from ...iconscore.icon_score_context import IconScoreContext
RewardCalcDBInfo = namedtuple('RewardCalcDBInfo', ['path', 'block_height'])
class Storage(object):
"""Manages RC DB which Reward Calculator will use to calculate a reward for each address
"""
CURRENT_IISS_DB_NAME = "current_db"
STANDBY_IISS_DB_NAME_PREFIX = "standby_rc_db"
IISS_RC_DB_NAME_PREFIX = "iiss_rc_db"
KEY_FOR_GETTING_LAST_TRANSACTION_INDEX = b'last_transaction_index'
KEY_FOR_CALC_RESPONSE_FROM_RC = b'calc_response_from_rc'
KEY_FOR_VERSION_AND_REVISION = b'version_and_revision'
@property
@classmethod
@classmethod
@classmethod
def close(self):
"""Close the embedded database.
"""
if self._db:
self._db.close()
self._db = None
@staticmethod
# todo: naming
@staticmethod
def replace_db(self, block_height: int) -> 'RewardCalcDBInfo':
"""
1. Rename current_db to standby_db_{block_height}
2. Create a new current_db for the next calculation period
:param block_height: End block height of the current calc period
:return:
"""
# rename current db -> standby db
assert block_height > 0
self._db.close()
standby_db_path: str = self.rename_current_db_to_standby_db(self._path, block_height)
self._db = self.create_current_db(self._path)
return RewardCalcDBInfo(standby_db_path, block_height)
@classmethod
def finalize_iiss_db(cls,
prev_end_bh: int,
current_db: 'KeyValueDatabase',
prev_db_path: str):
"""
Finalize iiss db before sending to reward calculator (i.e. RC). Process is below
1. Move last Block produce data to previous iiss_db which is to be sent to RC
2. db compaction
:param prev_end_bh: end block height of previous term
:param current_db: newly created db
:param prev_db_path: iiss_db path which is to be finalized and sent to RC (must has been closed)
:return:
"""
bp_key: bytes = make_block_produce_info_key(prev_end_bh)
prev_db: 'KeyValueDatabase' = KeyValueDatabase.from_path(prev_db_path)
cls._move_data_from_current_db_to_prev_db(bp_key,
current_db,
prev_db)
prev_db.close()
cls._process_db_compaction(prev_db_path)
@classmethod
@classmethod
def _process_db_compaction(cls, path: str):
"""
There is compatibility issue between C++ levelDB and go levelDB.
To solve it, should make DB being compacted before reading (from RC).
:param path: DB path to compact
:return:
"""
db = KeyValueDatabase.from_path(path)
db.close()
@classmethod
@classmethod
@classmethod
def get_total_elected_prep_delegated_snapshot(self) -> int:
"""
total_elected_prep_delegated_snapshot =
the delegated amount which the elected P-Reps received at the beginning of this term
- the delegated amount which unregistered P-Reps received in this term
This function is only intended for state backward compatibility
and not used any more after revision is set to 7.
"""
unreg_preps: Set['Address'] = set()
db = self._db.get_sub_db(TxData.PREFIX)
for k, v in db.iterator():
data: 'TxData' = TxData.from_bytes(v)
if data.type == TxType.PREP_UNREGISTER:
unreg_preps.add(data.address)
db = self._db.get_sub_db(PRepsData.PREFIX)
preps: Optional[List['DelegationInfo']] = None
for k, v in db.iterator():
data: 'PRepsData' = PRepsData.from_bytes(k, v)
preps = data.prep_list
break
ret = 0
if preps:
for info in preps:
if info.address not in unreg_preps:
ret += info.value
Logger.info(tag=IISS_LOG_TAG,
msg=f"get_total_elected_prep_delegated_snapshot load: {ret}")
return ret
class IissDBNameRefactor(object):
"""Change iiss_db name: remove revision from iiss_db name
"""
_DB_NAME_PREFIX = Storage.IISS_RC_DB_NAME_PREFIX
@classmethod
@classmethod
@classmethod
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
13130,
314,
10943,
5693,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
7... | 2.388957 | 2,463 |
import django_version
import authorized
import handlers
| [
11748,
42625,
14208,
62,
9641,
198,
11748,
10435,
198,
11748,
32847,
628,
198
] | 4.461538 | 13 |
#
# Copyright (C) 2014-2021 S[&]T, The Netherlands.
#
from __future__ import absolute_import, division, print_function
__version__ = "5.1"
__copyright__ = "Copyright (C) 2014-2021 S[&]T, The Netherlands."
__all__ = ["Error", "InternalError", "Struct", "Archive", "open", "config_path"]
import os as _os
from muninn.archive import Archive, create as _create_archive
from muninn.exceptions import *
from muninn.struct import Struct
| [
2,
198,
2,
15069,
357,
34,
8,
1946,
12,
1238,
2481,
311,
58,
5,
60,
51,
11,
383,
12671,
13,
198,
2,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
834,
9641,
834,
796,
366,
20... | 3.055556 | 144 |
from typing import Any, Iterable, Optional
from explainaboard import feature
from explainaboard.tasks import TaskType
from explainaboard.info import SysOutputInfo
class Processor:
"""Base case for task-based processor"""
_features: feature.Features
_task_type: TaskType
| [
6738,
19720,
1330,
4377,
11,
40806,
540,
11,
32233,
198,
6738,
4727,
397,
11953,
1330,
3895,
198,
6738,
4727,
397,
11953,
13,
83,
6791,
1330,
15941,
6030,
198,
6738,
4727,
397,
11953,
13,
10951,
1330,
311,
893,
26410,
12360,
628,
198,
... | 3.594937 | 79 |
# Generated by Django 3.0.4 on 2020-03-23 02:58
from decimal import Decimal
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import jsonfield.fields
import multiselectfield.db.fields
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
19,
319,
12131,
12,
3070,
12,
1954,
7816,
25,
3365,
198,
198,
6738,
32465,
1330,
4280,
4402,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
11748,
42625,
14208,
13,
7295,
13,
12102,
2... | 3.282609 | 92 |
"""State Generator"""
from lesson17_projects.pen.auto_gen.data.const import A, INIT, IS, PEN, THIS
# States
from lesson18_projects.pen.auto_gen.code.states1.init_this_is_a import InitThisIsAState
from lesson18_projects.pen.auto_gen.code.states1.init_this_is import InitThisIsState
from lesson18_projects.pen.auto_gen.code.states1.init_this import InitThisState
from lesson18_projects.pen.auto_gen.code.states1.init import InitState
from lesson18_projects.pen.auto_gen.code.states1.pen import PenState
# State wrapper
from lesson18_projects.pen.code.states.init import create_init
from lesson18_projects.pen.code.states.init_this import create_init_this
from lesson18_projects.pen.code.states.init_this_is import create_init_this_is
from lesson18_projects.pen.code.states.init_this_is_a import create_init_this_is_a
from lesson18_projects.pen.code.states.pen import create_pen
# ステートを使い回すのではなく、アクセスするたびに ステートの生成を実行しなおせるよう、ラムダ関数を返します
state_gen_doc = {
INIT: {
"": lambda: create_init(InitState()),
THIS: {
"": lambda: create_init_this(InitThisState()),
IS: {
"": lambda: create_init_this_is(InitThisIsState()),
A: {
"": lambda: create_init_this_is_a(InitThisIsAState()),
},
},
},
},
PEN: lambda: create_pen(PenState()),
}
| [
37811,
9012,
35986,
37811,
198,
6738,
11483,
1558,
62,
42068,
13,
3617,
13,
23736,
62,
5235,
13,
7890,
13,
9979,
1330,
317,
11,
3268,
2043,
11,
3180,
11,
350,
1677,
11,
12680,
198,
198,
2,
1829,
198,
6738,
11483,
1507,
62,
42068,
13... | 2.277129 | 599 |
# hotkey_utils.py - bNull
#
# Some useful shortcuts for binding to hotkeys. Current output/hotkeys:
#
# [+] Bound make_dwords to Ctrl-Alt-D
# [+] Bound make_cstrings to Ctrl-Alt-A
# [+] Bound make_offset to Ctrl-Alt-O
import idaapi
import idc
import inspect
def selection_is_valid(selection, ea):
"""If the cursor is not at the beginning or the end of our selection, assume that
something bad has gone wrong and bail out instead of turning a lot of important
things into dwords.
"""
if not (ea == selection[1] or ea == selection[2]-1):
return False
else:
return True
def cool_to_clobber(ea):
"""Verify whether or not the byte is somethng that we'll regret clobbering at
some later point
"""
# Currently, just check to see if there's an instruction defined there.
# TODO: Check for additional things would not be cool-to-clobber.
if idc.GetMnem(ea):
return False
else:
return True
def get_selected_bytes():
"""Highlight a range and turn it into dwords
NOTE: read_selection appears to be a fickle bitch. You absolutely have to
select more than one line at a time in order for it to work as expected.
"""
selected = idaapi.read_selection()
curr_ea = idc.ScreenEA()
print "[+] Processing range: %x - %x" % (selected[1],selected[2])
# refer to selection_is_valid comments regarding the need for this check
if (selection_is_valid(selected, curr_ea)):
return selected
else:
return None
def make_cstrings():
"""Highlight a range and turn it into c-style strings
NOTE: read_selection appears to be a fickle bitch. You absolutely have to
select more than one line at a time in order for it to work as expected.
"""
# TODO check to verify that each byte is valid ascii
selected = get_selected_bytes()
if selected:
curr_start = selected[1]
curr_length = 0
for ea in range(selected[1], selected[2]):
if not cool_to_clobber(ea):
print "[-] Error: Something that we shouldn't clobber at 0x%x" % ea
break
curr_byte = idaapi.get_byte(ea)
curr_length += 1
if curr_byte == 0:
if curr_length > 1:
idaapi.doASCI(curr_start,curr_length)
curr_length = 0
curr_start = ea + 1
else:
curr_length = 0
curr_start = ea + 1
else:
print "[-] Error: EA is not currently a selection endpoint %x" % idc.ScreenEA()
def make_offset():
"""Resolve an offset to a pointer
For some reason, it seems as though IDA will not auto-define a pointer DWORD. Ex:
.rodata:08E30000 dd 8271234h
In the case that 0x8271234 is actually a function, resolving the offset will
result in:
.rodata:08E30000 dd offset _ZN29ClassAD1Ev ; ClassA::~ClassA()
"""
idc.OpOffset(idc.ScreenEA(),0)
load_hotkeys() | [
2,
3024,
2539,
62,
26791,
13,
9078,
532,
275,
35067,
198,
2,
220,
198,
2,
2773,
4465,
32953,
329,
12765,
284,
3024,
13083,
13,
9236,
5072,
14,
8940,
13083,
25,
198,
2,
198,
2,
47175,
30149,
787,
62,
67,
10879,
284,
19212,
12,
2916... | 2.368381 | 1,303 |
import math
import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
testing.run_module(__name__, __file__)
| [
11748,
10688,
198,
11748,
555,
715,
395,
198,
198,
11748,
299,
32152,
198,
11748,
2237,
198,
198,
11748,
6333,
263,
198,
6738,
6333,
263,
1330,
269,
15339,
198,
6738,
6333,
263,
1330,
5499,
198,
6738,
6333,
263,
1330,
31312,
62,
9122,
... | 3.581395 | 86 |
#!/usr/bin/python
import tarfile
unsafe_filename_tar = sys.argv[1]
safe_filename_tar = "safe_path.tar"
tar = tarfile.open(safe_filename_tar)
for entry in tar:
tar.extract(entry)
tar = tarfile.open(unsafe_filename_tar)
tar.extractall()
tar.close()
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
tar.extract(entry)
tar = tarfile.open(safe_filename_tar)
tar.extractall()
tar.close()
#Sanitized
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if os.path.isabs(entry.name) or ".." in entry.name:
raise ValueError("Illegal tar archive entry")
tar.extract(entry, "/tmp/unpack/")
#Part Sanitized
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if ".." in entry.name:
raise ValueError("Illegal tar archive entry")
tar.extract(entry, "/tmp/unpack/")
#Unsanitized members
tar = tarfile.open(unsafe_filename_tar)
tar.extractall(members=tar)
#Sanitize members
tar = tarfile.open(unsafe_filename_tar)
tar.extractall(members=safemembers(tar))
# Wrong sanitizer (is missing not)
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if os.path.isabs(entry.name) or ".." in entry.name:
tar.extract(entry, "/tmp/unpack/")
# OK Sanitized using not
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if not (os.path.isabs(entry.name) or ".." in entry.name):
tar.extract(entry, "/tmp/unpack/")
# The following two variants are included by purpose, since by default there is a
# difference in handling `not x` and `not (x or False)` when overriding
# Sanitizer.sanitizingEdge. We want to ensure we handle both consistently.
# Not reported, although vulnerable to '..'
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if not (os.path.isabs(entry.name) or False):
tar.extract(entry, "/tmp/unpack/")
# Not reported, although vulnerable to '..'
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if not os.path.isabs(entry.name):
tar.extract(entry, "/tmp/unpack/")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
13422,
7753,
198,
198,
13271,
8635,
62,
34345,
62,
18870,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
21230,
62,
34345,
62,
18870,
796,
366,
21230,
62,
6978,
13,
18870,
1,
628,
198,
... | 2.634691 | 761 |
import os
import io
import sys
import setuptools
from setuptools import setup
version = "1.0.0"
with io.open('README.md', 'r', encoding='utf-8') as readme_file:
readme = readme_file.read()
if sys.argv[-1] == 'readme':
print(readme)
sys.exit()
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements('./requirements.txt')
# e.g. ['django==1.5.1', 'mezzanine==1.4.6']
reqs = [str(ir) for ir in install_reqs if not str(ir).startswith("-") ]
setup(
name='insta_crawler',
version=version,
description=('Scraping Instagram profils'),
long_description=readme,
long_description_content_type='text/markdown',
author='Ghassen Chaabouni',
author_email='ghassen1302@live.com',
packages=[
'insta_crawler',
],
# packages= setuptools.find_packages(),
package_dir={'insta_crawler': 'insta_crawler'},
include_package_data=True,
install_requires=reqs,
license='MIT',
entry_points={
'console_scripts': [
'insta=insta_crawler.__main__:main',
]
}
)
| [
11748,
28686,
198,
11748,
33245,
198,
11748,
25064,
198,
11748,
900,
37623,
10141,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
9641,
796,
366,
16,
13,
15,
13,
15,
1,
198,
198,
4480,
33245,
13,
9654,
10786,
15675,
11682,
1... | 2.522727 | 528 |
import sys
from pathlib import Path
import environ
env = environ.Env()
env_file = ".env"
# This works good enough for the console, pycharm and travis ci
TESTING = sys.argv[1:2] == ["test"] or "pytest" in sys.modules
if env.str("ENV_PATH", None):
env_file = env.str("ENV_PATH")
assert Path(env_file).is_file()
elif TESTING:
# This anchoring allows to run tests below the project root
env_file = Path(__file__).parent.parent.parent.joinpath("etc/test.env")
assert env_file.is_file(), "The test env is missing"
env.read_env(str(env_file))
| [
11748,
25064,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
551,
2268,
198,
198,
24330,
796,
551,
2268,
13,
4834,
85,
3419,
198,
24330,
62,
7753,
796,
27071,
24330,
1,
198,
198,
2,
770,
2499,
922,
1576,
329,
262,
8624,
11,
12... | 2.692308 | 208 |
from .connection import LDAPConnection, Server
__all__ = ['LDAPConnection', 'Server']
__version__ = '0.4.2'
| [
6738,
764,
38659,
1330,
27178,
2969,
32048,
11,
9652,
198,
198,
834,
439,
834,
796,
37250,
11163,
2969,
32048,
3256,
705,
10697,
20520,
198,
198,
834,
9641,
834,
796,
705,
15,
13,
19,
13,
17,
6,
198
] | 2.972973 | 37 |
"""
Example EDMC plugin.
It adds a single button to the EDMC interface that displays the number of times it has been clicked.
"""
# Copyright (c) 2020 Club Raiders Project
# https://github.com/HausReport/ClubRaiders
#
# SPDX-License-Identifier: BSD-3-Clause
import logging
import os
import tkinter as tk
from typing import Optional
try:
import myNotebook as nb
from config import appname, config
except ImportError:
pass
import GlobalDictionaries
from helpers.DiscordReporter import DiscordReporter
GlobalDictionaries.init_logger()
GlobalDictionaries.load_addresses()
from helpers.DailyPlan import DailyPlan
from helpers.DailyPlans import DailyPlans
from helpers.LogReporter import LogReporter
logger = GlobalDictionaries.logger
logReporter: LogReporter = LogReporter(logger)
logger.info("Test log msg")
logging.info("This is a second log msg")
class BgsBuddy:
"""
ClickCounter implements the EDMC plugin interface.
It adds a button to the EDMC UI that displays the number of times it has been clicked, and a preference to set
the number directly.
"""
def on_load(self) -> str:
"""
on_load is called by plugin_start3 below.
It is the first point EDMC interacts with our code after loading our module.
:return: The name of the plugin, which will be used by EDMC for logging and for the settings window
"""
return GlobalDictionaries.plugin_name
def on_unload(self) -> None:
"""
on_unload is called by plugin_stop below.
It is the last thing called before EDMC shuts down. :1
Note that blocking code here will hold the shutdown process.
"""
self.on_preferences_closed("", False) # Save our prefs
def setup_preferences(self, parent: nb.Notebook, cmdr: str, is_beta: bool) -> Optional[tk.Frame]:
"""
setup_preferences is called by plugin_prefs below.
It is where we can setup our own settings page in EDMC's settings window. Our tab is defined for us.
:param parent: the tkinter parent that our returned Frame will want to inherit from
:param cmdr: The current ED Commander
:param is_beta: Whether or not EDMC is currently marked as in beta mode
:return: The frame to add to the settings window
"""
current_row = 0
frame = nb.Frame(parent)
# setup our config in a "Click Count: number"
nb.Label(frame, text='Click Count').grid(row=current_row)
nb.Entry(frame, textvariable=self.click_count).grid(row=current_row, column=1)
current_row += 1 # Always increment our row counter, makes for far easier tkinter design.
return frame
def on_preferences_closed(self, cmdr: str, is_beta: bool) -> None:
"""
on_preferences_closed is called by prefs_changed below.
It is called when the preferences dialog is dismissed by the user.
:param cmdr: The current ED Commander
:param is_beta: Whether or not EDMC is currently marked as in beta mode
"""
config.set('click_counter_count', self.click_count.get())
def setup_main_ui(self, parent: tk.Frame) -> tk.Frame:
"""
Create our entry on the main EDMC UI.
This is called by plugin_app below.
:param parent: EDMC main window Tk
:return: Our frame
"""
current_row = 0
frame = tk.Frame(parent)
button = tk.Button(
frame,
text="Count me",
command=lambda: self.click_count.set(str(int(self.click_count.get()) + 1))
)
button.grid(row=current_row)
current_row += 1
nb.Label(frame, text="Count:").grid(row=current_row, sticky=tk.W)
nb.Label(frame, textvariable=self.click_count).grid(row=current_row, column=1)
return frame
cmdrNameSet = False
cc = BgsBuddy()
samplePlan: DailyPlan = DailyPlan("LHS 2477", "Federal Reclamation Co", "Hodack Prison Colony")
samplePlan.addMissionInfluenceGoal(60)
samplePlan.addBountyGoal(16000000)
samplePlan.addCartographyGoal(8000000)
samplePlan.addTradeProfitGoal(16000000)
samplePlan2: DailyPlan = DailyPlan("HR 5975", "Beyond Infinity Corporation", "Wreaken Construction")
samplePlan2.addMissionInfluenceGoal(60)
samplePlan2.addBountyGoal(16000000)
samplePlan2.addCartographyGoal(8000000)
samplePlan2.addTradeProfitGoal(16000000)
samplePlan3: DailyPlan = DailyPlan("LAWD 26", "Minutemen", "Sirius Corporation")
samplePlan3.addMissionInfluenceGoal(90)
samplePlan3.addBountyGoal(16000000)
samplePlan3.addCartographyGoal(8000000)
samplePlan3.addTradeProfitGoal(0)
samplePlan3.addTradeLossGoal(16000000)
samplePlan3.addMurderGoal(32)
samplePlan3.setHookUrl("https://discordapp.com/api/webhooks/785228043128012820/uFmUix9PqWhh1cAoYYx1Hsh43VVmGPwCnNQlq5is1vBhqKUTeC2h0-VgDXfmQttuq9UX")
dailyPlans: DailyPlans = DailyPlans(logReporter)
dailyPlans.addPlan(samplePlan)
dailyPlans.addPlan(samplePlan2)
dailyPlans.addPlan(samplePlan3)
disco = DiscordReporter(logger)
dailyPlans.addReporter(disco)
#
# Direct EDMC callbacks to class
#
# Note that all of these could be simply replaced with something like:
# plugin_start3 = cc.on_load
| [
37811,
198,
16281,
8392,
9655,
13877,
13,
198,
1026,
6673,
257,
2060,
4936,
284,
262,
8392,
9655,
7071,
326,
11298,
262,
1271,
286,
1661,
340,
468,
587,
28384,
13,
198,
37811,
198,
198,
2,
220,
220,
15069,
357,
66,
8,
12131,
6289,
1... | 2.700104 | 1,924 |
'''
Created on Jan 16, 2014
@author: sean
'''
from __future__ import absolute_import, division, print_function
from functools import partial
import json
import logging
import os
import sys
import jinja2
from .conda_interface import PY3
from .environ import get_dict as get_environ
from .metadata import select_lines, ns_cfg
log = logging.getLogger(__file__)
class UndefinedNeverFail(jinja2.Undefined):
"""
A class for Undefined jinja variables.
This is even less strict than the default jinja2.Undefined class,
because it permits things like {{ MY_UNDEFINED_VAR[:2] }} and
{{ MY_UNDEFINED_VAR|int }}. This can mask lots of errors in jinja templates, so it
should only be used for a first-pass parse, when you plan on running a 'strict'
second pass later.
"""
all_undefined_names = []
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = \
__complex__ = __pow__ = __rpow__ = \
lambda self, *args, **kwargs: UndefinedNeverFail(hint=self._undefined_hint,
obj=self._undefined_obj,
name=self._undefined_name,
exc=self._undefined_exception)
__str__ = __repr__ = \
lambda *args, **kwargs: u''
__int__ = lambda _: 0
__float__ = lambda _: 0.0
class FilteredLoader(jinja2.BaseLoader):
"""
A pass-through for the given loader, except that the loaded source is
filtered according to any metadata selectors in the source text.
"""
def context_processor(initial_metadata, recipe_dir, config, permit_undefined_jinja):
"""
Return a dictionary to use as context for jinja templates.
initial_metadata: Augment the context with values from this MetaData object.
Used to bootstrap metadata contents via multiple parsing passes.
"""
ctx = get_environ(config=config, m=initial_metadata)
environ = dict(os.environ)
environ.update(get_environ(config=config, m=initial_metadata))
ctx.update(
load_setup_py_data=partial(load_setup_py_data, config=config, recipe_dir=recipe_dir,
permit_undefined_jinja=permit_undefined_jinja),
# maintain old alias for backwards compatibility:
load_setuptools=partial(load_setuptools, config=config, recipe_dir=recipe_dir,
permit_undefined_jinja=permit_undefined_jinja),
load_npm=load_npm,
environ=environ)
return ctx
| [
7061,
6,
198,
41972,
319,
2365,
1467,
11,
1946,
198,
198,
31,
9800,
25,
384,
272,
198,
7061,
6,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
... | 2.362468 | 1,167 |
import argparse
from experiments.income_census.train_config import fine_tune_hyperparameters, data_hyperparameters
from experiments.income_census.train_census_no_fg_adapt_pretrain import create_no_fg_census_global_model
from experiments.income_census.train_census_utils import finetune_census
if __name__ == "__main__":
parser = argparse.ArgumentParser("census_no-fg_target_fine_tune")
parser.add_argument('--pretrain_task_id', type=str)
args = parser.parse_args()
pretrain_task_id = args.pretrain_task_id
print(f"[INFO] fine-tune pre-trained model with pretrain task id : {pretrain_task_id}")
census_pretain_model_root_dir = data_hyperparameters['census_no-fg_pretrained_model_dir']
init_model, census_finetune_target_model_root_dir = get_finetune_model_meta()
task_id = finetune_census(pretrain_task_id,
census_pretain_model_root_dir,
census_finetune_target_model_root_dir,
fine_tune_hyperparameters,
data_hyperparameters,
init_model)
print(f"[INFO] finetune task id:{task_id}")
| [
11748,
1822,
29572,
198,
198,
6738,
10256,
13,
12519,
62,
66,
7314,
13,
27432,
62,
11250,
1330,
3734,
62,
83,
1726,
62,
49229,
17143,
7307,
11,
1366,
62,
49229,
17143,
7307,
198,
6738,
10256,
13,
12519,
62,
66,
7314,
13,
27432,
62,
... | 2.260116 | 519 |
PRJ_PATH = "/home/junkyul/conda/gmid2"
import sys
sys.path.append(PRJ_PATH)
import os
import time
from pprint import PrettyPrinter
pp = PrettyPrinter(indent=4)
from gmid2.global_constants import *
from gmid2.basics.uai_files import read_limid, read_svo
from gmid2.basics.directed_network import DecisionNetwork
from gmid2.basics.graphical_model import GraphicalModel
from gmid2.inference.submodel import submodel_tree_decomposition
from gmid2.inference.st_wmbmm_bw import StWMBMMBw
if __name__ == "__main__":
if len(sys.argv) > 1:
file_path = sys.argv[1]
ibound = int(sys.argv[2])
else:
TEST_PATH = os.path.join(BENCHMARK_DIR, "synthetic")
f = "mdp1-4_2_2_5.uai"
file_path = os.path.join(TEST_PATH, f)
ibound = 1
run(file_path, ibound)
| [
4805,
41,
62,
34219,
796,
12813,
11195,
14,
73,
28898,
377,
14,
66,
13533,
14,
70,
13602,
17,
1,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7,
4805,
41,
62,
34219,
8,
198,
198,
11748,
28686,
198,
11748,
640,
198,
6738,
27... | 2.265537 | 354 |
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.models import User
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView
)
from .models import Post
from gensim.summarization import keywords
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lex_rank import LexRankSummarizer
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
651,
62,
15252,
62,
273,
62,
26429,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
19816,
1040,
1330,
23093,
37374,
35608,
259,
11,
11787,
47,
13978,
14402,
35608,
259,
198,
... | 3.083333 | 168 |
import mysql.connector
from geopy.geocoders import Nominatim
from instagram.client import InstagramAPI
import json
import urllib2
import flickr
INSTAGRAM_CLIENT_ID = '5d56eb1e594c420997c394d1dca7fcea'
INSTAGRAM_CLIENT_SECRET = 'd0d78baa1e4e4f4b8af9fd9588379968'
api = InstagramAPI(client_id=INSTAGRAM_CLIENT_ID,client_secret=INSTAGRAM_CLIENT_SECRET)
cnx = mysql.connector.connect(user='galleryhop', password='galleryhop', host='galleryhop2.crflf9mu2uwj.us-east-1.rds.amazonaws.com',database='galleryhop2')
cursor = cnx.cursor()
cursor.execute("""select * from galleries""")
geolocator = Nominatim()
coords = []
for row in cursor:
try:
location = geolocator.geocode(row[5]+' NYC')
coords.append((location.latitude,location.longitude))
except:
print 'error'
print coords
for i in coords:
photos = flickr.photos_search(lat=i[0],lon=i[1],per_page=5,radius=0.25)
for p in photos:
str = 'https://farm'+p.farm+'.staticflickr.com/'+p.server+'/'+p.id+'_'+p.secret+'.jpg'
print str
| [
11748,
48761,
13,
8443,
273,
198,
6738,
4903,
11081,
13,
469,
420,
375,
364,
1330,
399,
6351,
265,
320,
198,
6738,
916,
6713,
13,
16366,
1330,
10767,
17614,
198,
11748,
33918,
198,
11748,
2956,
297,
571,
17,
198,
11748,
781,
18994,
19... | 2.405797 | 414 |
from calamari_ocr.ocr.backends.ctc_decoder.ctc_decoder import CTCDecoder
import numpy as np
if __name__ == "__main__":
d = FuzzyCTCDecoder()
r = d.decode(np.array(np.transpose([[0.8, 0, 0.7, 0.2, 0.1], [0.1, 0.4, 0.2, 0.7, 0.8], [0.1, 0.6, 0.1, 0.1, 0.1]])))
print(r)
| [
6738,
35765,
2743,
62,
1696,
13,
1696,
13,
1891,
2412,
13,
310,
66,
62,
12501,
12342,
13,
310,
66,
62,
12501,
12342,
1330,
327,
4825,
10707,
12342,
198,
198,
11748,
299,
32152,
355,
45941,
628,
198,
198,
361,
11593,
3672,
834,
6624,
... | 1.868421 | 152 |
from django.forms import ModelForm
from itembase.core.models import UnitOfMeasure, VendorItem
| [
6738,
42625,
14208,
13,
23914,
1330,
9104,
8479,
198,
198,
6738,
2378,
8692,
13,
7295,
13,
27530,
1330,
11801,
5189,
47384,
11,
39896,
7449,
628,
198
] | 3.730769 | 26 |
import io
import json
import tempfile
import pytest
import yaml
from yaml.parser import ParserError as YamlParserError
from pji.utils import auto_load_json, JsonLoadError
@pytest.mark.unittest
| [
11748,
33245,
198,
11748,
33918,
198,
11748,
20218,
7753,
198,
198,
11748,
12972,
9288,
198,
11748,
331,
43695,
198,
6738,
331,
43695,
13,
48610,
1330,
23042,
263,
12331,
355,
14063,
75,
46677,
12331,
198,
198,
6738,
279,
7285,
13,
26791,... | 3.126984 | 63 |
from jsonauthenticator.jsonauthenticator import JsonAuthenticator
__all__ = [JsonAuthenticator]
| [
6738,
33918,
41299,
26407,
13,
17752,
41299,
26407,
1330,
449,
1559,
47649,
26407,
198,
834,
439,
834,
796,
685,
41,
1559,
47649,
26407,
60,
198
] | 3.84 | 25 |
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import app_config
import datetime
import logging
import requests
import shortcodes
from render_utils import make_context
from PIL import Image
from StringIO import StringIO
from functools import partial
from jinja2 import Environment, FileSystemLoader
from pymongo import MongoClient
IMAGE_URL_TEMPLATE = '%s/%s'
IMAGE_TYPES = ['image', 'asset-image']
COLLAGE_TYPES = ['collage2']
SHORTCODE_DICT = {
'image': {
'caption': '',
'width': '100%',
'format': 'centered'
},
'collage2': {
'caption': '',
'width': '100%',
'format': 'centered'
},
'asset-image': {
'caption': '',
'width': '100%',
'format': 'centered'
},
'idpgraphic': {},
'video': {},
}
env = Environment(loader=FileSystemLoader('templates/shortcodes'))
logging.basicConfig(format=app_config.LOG_FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(app_config.LOG_LEVEL)
def _process_id(url, tag):
"""
Extract an ID from a url (or just return the URL).
"""
if tag == 'tweet':
parts = url.split('/')
return parts[5]
else:
return url
def _get_extra_context(id, tag):
"""
Do some processing
"""
extra = dict()
if tag in IMAGE_TYPES:
extra.update(_get_image_context(id, tag))
return extra
def _get_collage_extra_context(pargs, tag):
"""
Do some processing
"""
extra = dict()
if tag in COLLAGE_TYPES:
extra.update(_get_collage_context(pargs))
return extra
def _handler(context, content, pargs, kwargs, tag, defaults):
"""
Default handler all other handlers inherit from.
"""
if pargs:
if tag in COLLAGE_TYPES:
template_context = dict()
extra_context = _get_collage_extra_context(pargs, tag)
template_context.update(extra_context)
else:
id = _process_id(pargs[0], tag)
template_context = dict(url=pargs[0],
id=id)
extra_context = _get_extra_context(id, tag)
template_context.update(extra_context)
else:
template_context = dict()
if tag == 'idpgraphic':
template_context.update(make_context())
template_context.update(defaults)
template_context.update(kwargs)
template = env.get_template('%s.html' % tag)
output = template.render(**template_context)
return output
"""
Register handlers
"""
parser = shortcodes.Parser()
for tag, defaults in SHORTCODE_DICT.items():
tag_handler = partial(_handler, tag=tag, defaults=defaults)
parser.register(tag_handler, tag)
def process_shortcode(tag):
"""
Generates html from shortcode
"""
# Replace unicode <br>
# Replace rquote to normal quotation marks
text = tag.get_text()
text = text.replace(u'\xa0', u' ')
text = text.replace(u'\u201D', u'"')
text = text.replace(u'\u201C', u'"')
try:
return parser.parse(text)
except shortcodes.RenderingError as e:
logger.error('Could not render short code in: "%s"' % text)
logger.error('cause: %s' % e.__cause__)
return ''
def _get_image_context(id, tag):
"""
Download image and get/cache aspect ratio.
"""
if (tag == 'asset-image'):
image = Image.open('www/%s' % id)
ratio = float(image.height) / float(image.width)
ratio = round(ratio * 100, 2)
return dict(ratio=ratio)
url = IMAGE_URL_TEMPLATE % (app_config.IMAGE_URL, id)
client = MongoClient(app_config.MONGODB_URL)
database = client['idp-georgia']
collection = database.images
result = collection.find_one({'_id': id})
if not result:
logger.info('image %s: uncached, downloading %s' % (id, url))
response = requests.get(url)
image = Image.open(StringIO(response.content))
ratio = float(image.height) / float(image.width)
collection.insert({
'_id': id,
'date': datetime.datetime.utcnow(),
'ratio': ratio,
})
else:
logger.info('image %s: retrieved from cache' % id)
ratio = result['ratio']
ratio = round(ratio * 100, 2)
return dict(ratio=ratio, url=url)
def _get_collage_context(pargs):
"""
Download image and get/cache aspect ratio.
"""
ratios = {}
for ix, id in enumerate(pargs):
url = IMAGE_URL_TEMPLATE % (app_config.IMAGE_URL, id)
ratios['url%s' % ix] = url
client = MongoClient(app_config.MONGODB_URL)
database = client['idp-georgia']
collection = database.images
result = collection.find_one({'_id': id})
if not result:
logger.info('image %s: uncached, downloading %s' % (id, url))
response = requests.get(url)
image = Image.open(StringIO(response.content))
ratio = float(image.height) / float(image.width)
collection.insert({
'_id': id,
'date': datetime.datetime.utcnow(),
'ratio': ratio,
})
else:
logger.info('image %s: retrieved from cache' % id)
ratio = result['ratio']
ratio = round(ratio * 100, 2)
ratios['ratio%s' % ix] = ratio
return ratios
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
4808,
9,
62,
19617,
25,
40477,
12,
23,
4808,
9,
62,
198,
11748,
598,
62,
11250,
198,
11748,
4818,
8079,
198,
11748,
18931,
198,
11748,
7007,
198,
11748,
1790,
40148,
198,
6738,
85... | 2.258024 | 2,368 |
# Generated by Django 2.0.7 on 2018-07-27 12:02
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
22,
319,
2864,
12,
2998,
12,
1983,
1105,
25,
2999,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# pylint: disable=invalid-name,missing-docstring
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
import default
xbmc = __import__('xbmc')
xbmcaddon = __import__('xbmcaddon')
xbmcgui = __import__('xbmcgui')
xbmcvfs = __import__('xbmcvfs')
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
25,
357,
66,
8,
13130,
11,
32167,
370,
494,
364,
4275,
67,
363,
86,
494,
364,
8,
1279,
67,
363,
31,
86,
494,
364,
13,
785,
29,
198,
2,
22961,
3611,
509... | 2.509434 | 212 |
# -*- coding: utf-8 -*-
"""Revision cleaning utilties.
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------------------
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import bs4
months = [
'January',
'February',
'March',
'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December',
'Jan',
'Feb',
'Mar',
'Apr',
'May',
'Jun',
'Jul',
'Aug',
'SJep',
'Oct',
'Nov',
'Dec',
]
month_or = '|'.join(months)
date_p = re.compile(r'\d\d:\d\d,( \d?\d)? (%s)( \d?\d)?,? \d\d\d\d (\(UTC\))?' %
month_or)
pre_sub_patterns = [(r'\[\[Image:.*?\]\]', ''), (r'\[\[File:.*?\]\]', ''),
(r'\[\[User:.*?\]\]', ''), (r'\[\[user:.*?\]\]', ''),
(r'\(?\[\[User talk:.*?\]\]\)?', ''),
(r'\(?\[\[user talk:.*?\]\]\)?', ''),
(r'\(?\[\[User Talk:.*?\]\]\)?', ''),
(r'\(?\[\[User_talk:.*?\]\]\)?', ''),
(r'\(?\[\[user_talk:.*?\]\]\)?', ''),
(r'\(?\[\[User_Talk:.*?\]\]\)?', ''),
(r'\(?\[\[Special:Contributions.*?\]\]\)?', '')]
post_sub_patterns = [('--', ''), (' :', ' '),
('—Preceding .* comment added by •', '')]
def clean_html(rev):
"""Clean revision HTML."""
# Remove timestmp.
ret = re.sub(date_p, lambda x: '', rev)
# Strip HTML format.
try:
ret = bs4.BeautifulSoup(ret, 'html.parser').get_text()
except: # pylint: disable=bare-except
pass
# Change format for better diff
ret = re.sub('[\n]+', '\n', str(ret))
ret = '\n'.join(
[x.strip() for x in ret.splitlines() if x.strip()]) + '\n'
if ret == '\n':
return ''
return ret
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
18009,
1166,
12724,
3384,
2326,
444,
13,
198,
198,
15269,
2177,
3012,
3457,
13,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,... | 2.152498 | 1,141 |
#!/usr/bin/env python3
#
# MIS Timetable Splitter
#
# Split a combined HTML exported Student Timetable from a common Management
# Information System (MIS) product, that shall remain nameless, into individual
# per-student files.
#
#
# Copyright 2019 Test Valley School.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from html.parser import HTMLParser
import argparse
import re
import os
# each timetable begins with a <td class="TitleBold">, with "Timetable" written in it, which is convenient enough
# argument parsing
argparser = argparse.ArgumentParser(description='Split a combined HTML exported Student Timetable from a common Management Information System (MIS) product, that shall remain nameless, into individual per-student files.')
argparser.add_argument('-i', '--input', dest='inputfile', help='The input HTML file.', required=True, type=argparse.FileType('r'))
argparser.add_argument('-o', '--output',dest='outputpath', help='The directory for the output files', required=True)
argparser.add_argument('--force', dest='force', help='Allow this script to overwrite files in the output folder.', action='store_true')
# main execution
args = argparser.parse_args()
tt_parser = TimetableParser()
# check output path
if not os.path.exists(args.outputpath):
raise ValueError("The output path specified does not exist.")
if not os.path.isdir(args.outputpath):
raise ValueError("The output path specified is not a directory.")
if not args.force and len(os.listdir(args.outputpath)) > 0:
raise ValueError("The output path is not empty. To allow overwriting of files with the same name, re-run with --force.")
# have the parser identify points at which we will split the HTML file
tt_parser.feed(args.inputfile.read())
# with identified split points, split file into individual items??
args.inputfile.seek(0)
lines = args.inputfile.readlines()
for i in range(0, len(tt_parser.splitpoints)):
currentsplit = tt_parser.splitpoints[i]
currentline = lines[currentsplit[0]-1]
try:
nextsplit = tt_parser.splitpoints[i+1]
except IndexError:
# at the end of the loop, simply split from the current split point to the end of the line
nextsplit = (currentsplit[0]-1, len(currentline))
individual_tt_filename = os.path.join(args.outputpath, tt_parser.titles[i] + '.html')
with open(individual_tt_filename, 'w') as outputfile:
print("Writing " + individual_tt_filename)
# write header
outputfile.write('<html><head><title>' + tt_parser.titles[i] + '</title>')
# write the style tags -- disabled at the moment because WP strips inline style in post body
#outputfile.write('<style type="text/css">')
#outputfile.write(tt_parser.style_data)
#outputfile.write('</style>')
outputfile.write('</head><body>')
# this is hacky to a significant degree, but we split the original file part way through a tag, so we'll re-create
# the table and title class
outputfile.write('<table><tr><td class="TitleBold">')
outputfile.write(currentline[currentsplit[1]:nextsplit[1]])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
201,
198,
2,
201,
198,
2,
50029,
5045,
316,
540,
13341,
1967,
201,
198,
2,
201,
198,
2,
27758,
257,
5929,
11532,
29050,
13613,
5045,
316,
540,
422,
257,
2219,
8549,
201,
198,
2,
618... | 3.000803 | 1,245 |
import unittest
from covid19_icta import scrape
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
39849,
312,
1129,
62,
713,
64,
1330,
42778,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.538462 | 39 |
from .union import UnionType, UnionInfo
__all__ = ['UnionType', 'UnionInfo']
| [
6738,
764,
24592,
1330,
4479,
6030,
11,
4479,
12360,
198,
198,
834,
439,
834,
796,
37250,
38176,
6030,
3256,
705,
38176,
12360,
20520,
198
] | 3.25 | 24 |
"""
List items of
```
{
"AccountId": string,
"ClusterArn": string,
"HighSeverityCount": sum-of-critical-and-high-severity,
"Image": string,
"LastStatus": string,
"LaunchType": string,
"Region": string,
"TaskArn": string
}
```
"""
from boto3.session import Session
from botocore.exceptions import ClientError
import boto3
import click
from collections import defaultdict
import logging
import re
from helper.aws import AwsApiHelper
from helper.ser import dump_json
logging.getLogger().setLevel(logging.INFO)
@click.command()
@click.option("--x-role-name", "-x", help="Name of a cross account role for accessing cross account images")
@click.option("--profile", "-p", help="AWS profile name. Use profiles in ~/.aws if not specified.")
@click.option("--region", "-r", default="ap-southeast-2", show_default=True, help="AWS Region. Use 'all' for all regions.")
if __name__ == "__main__":
main()
| [
37811,
198,
8053,
3709,
286,
198,
15506,
63,
198,
90,
198,
220,
366,
30116,
7390,
1298,
4731,
11,
198,
220,
366,
2601,
5819,
3163,
77,
1298,
4731,
11,
198,
220,
366,
11922,
50,
964,
414,
12332,
1298,
2160,
12,
1659,
12,
34666,
12,
... | 3.066667 | 300 |
import itertools
from typing import Any, Optional
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from receptivefield.common import estimate_rf_from_gradient
from receptivefield.image import get_default_image
from receptivefield.types import (
ImageShape,
GridPoint,
GridShape,
ReceptiveFieldDescription,
ReceptiveFieldRect,
to_rf_rect,
)
def _plot_rect(
ax,
rect: ReceptiveFieldRect,
color: Any,
alpha: float = 0.9,
linewidth: float = 5,
size: float = 90,
) -> None:
"""
Plot rectangle and center point.
:param ax: matplotlib axis
:param rect: definition of rectangle
:param color:
:param alpha:
:param linewidth:
:param size: point size
"""
ax.add_patch(
patches.Rectangle(
(rect.y - rect.h / 2, rect.x - rect.w / 2),
rect.h,
rect.w,
alpha=alpha,
fill=False,
facecolor="white",
edgecolor=color,
linewidth=linewidth,
)
)
plt.scatter([rect.y], [rect.x], s=size, c=color)
def plot_gradient_field(
receptive_field_grad: np.ndarray,
image: np.ndarray = None,
axis: Optional[Any] = None,
**plot_params
) -> None:
"""
Plot gradient map from gradient tensor.
:param receptive_field_grad: numpy tensor of shape [N, W, H, C]
:param image: optional image of shape [W, H, 3]
:param axis: a matplotlib axis object as returned by the e.g. plt.subplot
function. If not None then axis is used for visualizations otherwise
default figure is created.
:param plot_params: additional plot params: figsize=(5, 5)
"""
receptive_field = estimate_rf_from_gradient(receptive_field_grad)
receptive_field_grad = np.array(receptive_field_grad).mean(0).mean(-1)
receptive_field_grad /= receptive_field_grad.max()
receptive_field_grad += (np.abs(receptive_field_grad) > 0) * 0.2
if image is not None:
receptive_field_grad = np.expand_dims(receptive_field_grad, -1)
receptive_field_grad = 255 / 2 * (receptive_field_grad + 1) + image * 0.5
receptive_field_grad = receptive_field_grad.astype("uint8")
if axis is None:
figsize = plot_params.get("figsize", (5, 5))
plt.figure(figsize=figsize)
axis = plt.subplot(111)
plt.title("Normalized gradient map")
im = plt.imshow(receptive_field_grad, cmap="coolwarm")
plt.xlabel("x")
plt.ylabel("y")
divider = make_axes_locatable(axis)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
axis.add_patch(
patches.Rectangle(
(
receptive_field.y - receptive_field.h / 2,
receptive_field.x - receptive_field.w / 2,
), # (x,y)
receptive_field.h,
receptive_field.w,
fill=False,
alpha=0.9,
linewidth=4,
edgecolor=(0.2, 0.2, 0.2),
)
)
axis.set_aspect("equal")
plt.tight_layout()
def plot_receptive_grid(
input_shape: GridShape,
output_shape: GridShape,
rf_params: ReceptiveFieldDescription,
custom_image: Optional[np.ndarray] = None,
plot_naive_rf: bool = False,
axis: Optional[Any] = None,
**plot_params
) -> None:
"""
Visualize receptive field grid.
:param input_shape: an input image shape as an instance of GridShape
:param output_shape: an output feature map shape
:param rf_params: an instance of ReceptiveFieldDescription computed for
this feature map.
:param custom_image: optional image [height, width, 3] to be plotted as
a background.
:param plot_naive_rf: plot naive version of the receptive field. Naive
version of RF does not take strides, and offsets into considerations,
it is a simple linear mapping from N points in feature map to pixels
in the image.
:param axis: a matplotlib axis object as returned by the e.g. plt.subplot
function. If not None then axis is used for visualizations otherwise
default figure is created.
:param plot_params: additional plot params: figsize=(5, 5)
"""
if custom_image is None:
img = get_default_image(shape=ImageShape(input_shape.h, input_shape.w))
else:
img = custom_image
figsize = plot_params.get("figsize", (10, 10))
# plot image
if axis is None:
plt.figure(figsize=figsize)
axis = plt.subplot(111)
axis.imshow(img)
# plot naive receptive field grid
if plot_naive_rf:
dw = input_shape.w / output_shape.w
dh = input_shape.h / output_shape.h
for i, j in itertools.product(range(output_shape.w), range(output_shape.h)):
x0, x1 = i * dw, (i + 1) * dw
y0, y1 = j * dh, (j + 1) * dh
axis.add_patch(
patches.Rectangle(
(y0, x0),
dh,
dw,
alpha=0.9,
fill=False,
edgecolor="gray",
linewidth=1,
)
)
rf_offset = rf_params.offset
rf_size = rf_params.size
rf_stride = rf_params.stride
# map from output grid space to input image
# plot RF grid based on rf params
points = [
map_point(i, j)
for i, j in itertools.product(range(output_shape.w), range(output_shape.h))
]
points = np.array(points)
axis.scatter(points[:, 1], points[:, 0], marker="o", c=(0.2, 0.9, 0.1, 0.9), s=10)
# plot receptive field from corner point
_plot_rect(
axis,
rect=to_rf_rect(rf_offset, rf_size),
color=(0.9, 0.3, 0.2),
linewidth=5,
size=90,
)
center_point = map_point(output_shape.w // 2, output_shape.h // 2)
_plot_rect(
axis,
rect=to_rf_rect(GridPoint(center_point[0], center_point[1]), rf_size),
color=(0.1, 0.3, 0.9),
linewidth=5,
size=90,
)
last_point = map_point(output_shape.w - 1, output_shape.h - 1)
_plot_rect(
axis,
rect=to_rf_rect(GridPoint(last_point[0], last_point[1]), rf_size),
color=(0.1, 0.9, 0.3),
linewidth=5,
size=90,
)
axis.set_aspect("equal")
| [
11748,
340,
861,
10141,
198,
6738,
19720,
1330,
4377,
11,
32233,
198,
198,
11748,
2603,
29487,
8019,
13,
8071,
2052,
355,
16082,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738... | 2.199725 | 2,914 |
"""Implementation of the Bukin N. 6 function as in the link below. The number of
problem dimensions is arbitrary, as well as the bounds.
https://www.sfu.ca/~ssurjano/bukin6.html
"""
from .function import Function
import torch
| [
37811,
3546,
32851,
286,
262,
36810,
259,
399,
13,
718,
2163,
355,
287,
262,
2792,
2174,
13,
383,
1271,
286,
198,
45573,
15225,
318,
14977,
11,
355,
880,
355,
262,
22303,
13,
198,
5450,
1378,
2503,
13,
82,
20942,
13,
6888,
14,
93,
... | 3.424242 | 66 |
from django.urls import path
from d2e_share_splitter.sharecontributions import views
app_name = "sharecontributions"
urlpatterns = [
# Django Ajax CRUD Operations
path("contribs/", views.ContribsView.as_view(), name="list_contribs"),
# path('contribs/log', views.ContribsLog.as_view(), name='log_contribs'),
path("contribs/create/", views.CreateContrib.as_view(), name="contrib_create"),
path(
"contrib/<int:pk>/delete/",
views.DeleteContrib.as_view(),
name="contrib_delete",
),
path(
"contrib/update-form/",
views.UpdateContribFormView.as_view(),
name="contrib_form_update",
),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
288,
17,
68,
62,
20077,
62,
22018,
1967,
13,
20077,
3642,
2455,
507,
1330,
5009,
198,
198,
1324,
62,
3672,
796,
366,
20077,
3642,
2455,
507,
1,
198,
198,
6371,
33279,
82,... | 2.362989 | 281 |
# encoding: utf-8
from Storm.Localized import Strings as LocalizedStrings
from Storm.GameData import Catalog
from sys import argv, stderr
from os.path import exists
if __name__ != '__main__':
print_utf8_e('herotrivia.py is a CLI file, not a module')
exit(-1)
if len(argv) < 2:
print_utf8_e('Usage: python %s path_to_mods_dir [locale]' % (argv[0]))
exit(1)
RootDir = argv[1]
RootDirLength = len(RootDir)
RootLocale = 'enus'
if len(argv) > 2:
RootLocale = argv[2]
GameDataList = ['%s/heroesdata.stormmod' % RootDir]
GameDataList += list(map(lambda x: '%s/%s/' % (RootDir, x.get('value').lower()[5:]), Catalog('%s/heroesdata.stormmod/base.stormdata/Includes.xml' % RootDir)))
print('Name, Radius, Inner Radius, Flags, Search')
for gameDataDir in GameDataList:
gameDataPath = '%s/base.stormdata/GameData.xml' % gameDataDir
if not exists(gameDataPath):
print_utf8_e('Catalog stormmod %s does not exist!' % gameDataPath)
continue
CLocale = LocalizedStrings({}).Load('%s/%s.stormdata/LocalizedData/GameStrings.txt' % (gameDataDir, RootLocale))
GameDataCatalog = set(map(lambda x: x.get('path'), Catalog(gameDataPath).findall("Catalog")))
for CatalogEntry in GameDataCatalog:
catalogPath = '%s/base.stormdata/%s' % (gameDataDir, CatalogEntry)
if not exists(catalogPath):
print_utf8_e('Catalog file %s does not exist!' % catalogPath)
continue
CatalogFile = Catalog(catalogPath)
for CUnit in CatalogFile.findall('CUnit'):
CUnitId = CUnit.get('id')
CUnitParent = CUnit.get('parent') or CUnitId
if CUnitParent.startswith('StormHero') is not True and CUnitId != 'RexxarMisha': continue
CUnitName = CLocale.get("Unit/Name/%s" % CUnitId)
CUnitRadius = 'Inherited'
if CUnit.find('Radius') is not None: CUnitRadius = CUnit.find('Radius').get('value')
CUnitInnerRadius = 'Inherited'
if CUnit.find('InnerRadius') is not None: CUnitInnerRadius = CUnit.find('InnerRadius').get('value')
CUnitFlags = list(map(lambda x: x.get('index'), filter(lambda x: x.get('value') == '1', CUnit.findall('HeroPlaystyleFlags'))))
CUnitFlags += list(filter(lambda x: x is not None and (x.startswith('HeroGeneric') or x == 'UltimateEvolutionInvalidTarget'), map(lambda x: x.get('Link'), CUnit.findall('BehaviorArray'))))
if len(CUnitFlags) == 0: CUnitFlags = ['Inherited']
CSearchText = CLocale.get("Hero/AdditionalSearchText/%s" % CUnitId[4:])
print('%s, %s, %s, %s, %s' % (CUnitName, CUnitRadius, CUnitInnerRadius, ', '.join(CUnitFlags), CSearchText))
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
6738,
8865,
13,
14565,
1143,
1330,
4285,
654,
355,
10714,
1143,
13290,
654,
198,
6738,
8865,
13,
8777,
6601,
1330,
44515,
198,
6738,
25064,
1330,
1822,
85,
11,
336,
1082,
81,
198,
6738,
28686,
13... | 2.383054 | 1,133 |
import glob
from os import system
files = glob.glob("*.py") + glob.glob("cfdl/*.py")
system("python3 -m black . --line-length 79")
for file in files:
# system(f"python3 -m pybetter {file}")
system(f"python3 -m isort {file}")
__all__ = ["files"]
| [
11748,
15095,
198,
6738,
28686,
1330,
1080,
198,
198,
16624,
796,
15095,
13,
4743,
672,
7203,
24620,
9078,
4943,
1343,
15095,
13,
4743,
672,
7203,
12993,
25404,
15211,
13,
9078,
4943,
198,
10057,
7203,
29412,
18,
532,
76,
2042,
764,
137... | 2.575758 | 99 |
import json
import sys
import pandas as pd
# noinspection SpellCheckingInspection
EXAMPLE_CTRL_JSON = """######### EXAMPLE JSON: #########
{
"save_path": "./basic_stat.csv",
"data": [
{
"name": "Baseline",
"csv_path": "./similarity_score.csv",
"col_name": "baseline",
"invalid_cell_as": 0,
},
{
"name": "Tor",
"csv_path": "./similarity_score.csv",
"col_name": "torsocks",
"invalid_cell_as": 0,
}
]
}
"""
def data_prepare(df_path, col_name, invalid_cell_as=None):
"""
sample_range = [start, end, interval], start can be left, end can be right if all data are included.
"""
df = pd.read_csv(df_path)
df = df[col_name]
if invalid_cell_as is not None:
df = df.fillna(float(invalid_cell_as))
return df
if __name__ == '__main__':
main()
| [
11748,
33918,
198,
11748,
25064,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
2,
645,
1040,
14978,
11988,
9787,
278,
818,
31308,
198,
6369,
2390,
16437,
62,
4177,
7836,
62,
40386,
796,
37227,
7804,
2,
7788,
2390,
16437,
19449,
25,
... | 2.035165 | 455 |
import aiohttp
import datetime
import json
import logging
from .error import NeoAsyncHTTPyException
from .utils import Timer
logger = logging.getLogger(__name__)
| [
11748,
257,
952,
4023,
198,
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
18931,
198,
198,
6738,
764,
18224,
1330,
21227,
42367,
40717,
88,
16922,
198,
6738,
764,
26791,
1330,
5045,
263,
628,
198,
6404,
1362,
796,
18931,
13,
1136,
1... | 3.428571 | 49 |
import os
import sys
from distutils.sysconfig import get_python_lib
from setuptools import find_packages, setup
setup(
name='arrested',
version='0.1.3',
author='Mikey Waites',
author_email='mike@oldstlabs.com',
url='https://github.com/mikeywaites/flask-arrested',
description=('A framework for rapidly building REST APIs in Flask.'),
license='MIT',
packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=['flask'],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| [
11748,
28686,
198,
11748,
25064,
198,
6738,
1233,
26791,
13,
17597,
11250,
1330,
651,
62,
29412,
62,
8019,
198,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
283,
2... | 2.726916 | 509 |
import inspect
import celery
from kombu import Queue, Exchange
from sqlalchemy.engine.url import URL
| [
11748,
10104,
198,
198,
11748,
18725,
1924,
198,
6738,
479,
2381,
84,
1330,
4670,
518,
11,
12516,
198,
6738,
44161,
282,
26599,
13,
18392,
13,
6371,
1330,
10289,
628
] | 3.551724 | 29 |
# Make a particles.in file for a streak line
# Continuous release from single source
from numpy import linspace
start_time = "1989-05-24T12"
num_particles = 1001
# Release point in grid coordinates
x, y = 115, 100
zmax = 200
Z = linspace(zmax, 0, num_particles)
with open("station.rls", mode="w") as f:
for z in Z:
f.write("{:s} {:7.3f} {:7.3f} {:6.2f}\n".format(start_time, x, y, z))
| [
2,
6889,
257,
13166,
13,
259,
2393,
329,
257,
15113,
1627,
198,
2,
45012,
2650,
422,
2060,
2723,
198,
198,
6738,
299,
32152,
1330,
300,
1040,
10223,
198,
198,
9688,
62,
2435,
796,
366,
25475,
12,
2713,
12,
1731,
51,
1065,
1,
198,
... | 2.45122 | 164 |
#!/usr/bin/env python
from __future__ import absolute_import, division
from math import sqrt
from scipy.optimize import brentq
from common import func1, MIN_MACH, MAX_MACH
from constants import GAMMA
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
201,
198,
201,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
201,
198,
201,
198,
6738,
10688,
1330,
19862,
17034,
201,
198,
201,
198,
6738,
629,
541,
88,
13,
40... | 2.489362 | 94 |
import json
import sys
import random
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim import RMSprop
from torch.optim.lr_scheduler import StepLR
from loader import load_data, load_ontology, load_embed, load_sys_vocab, load_kb
from kb import load_kb
from model import load_tracker_model
from decoder import load_generator_model
from codec import Codec
from sentence_generator import SentenceGenerator
CONFIG_FN = 'config.json'
sent_groups = {}
if __name__ == '__main__':
main() | [
11748,
33918,
198,
11748,
25064,
198,
11748,
4738,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
6738,
28034... | 3.331395 | 172 |
from pybox.math import vec2d
from pybox.math import matrix
from pybox.math import transform | [
6738,
12972,
3524,
13,
11018,
1330,
43030,
17,
67,
198,
6738,
12972,
3524,
13,
11018,
1330,
17593,
198,
6738,
12972,
3524,
13,
11018,
1330,
6121
] | 3.64 | 25 |
#
# Memento Pi
# Utilities for dsiplaying notification on the raspberry pi
#
import time
import threading
from gpiozero import TonalBuzzer
buzzer = TonalBuzzer(23)
# play a tone on the buzzer
| [
2,
198,
2,
337,
972,
78,
13993,
198,
2,
41086,
329,
288,
13396,
17916,
14483,
319,
262,
38973,
31028,
198,
2,
628,
198,
11748,
640,
198,
11748,
4704,
278,
198,
6738,
27809,
952,
22570,
1330,
16859,
282,
48230,
263,
198,
198,
65,
471... | 3.145161 | 62 |
import torch
import torch.nn as nn
import torch.nn.functional as F
#from lib.config import cfg
#import lib.utils as utils
# h -- batch_size * cfg.MODEL.RNN_SIZE
# att_feats -- batch_size * att_num * att_feats_dim
# p_att_feats -- batch_size * att_num * cfg.ATT_HIDDEN_SIZE | [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
2,
6738,
9195,
13,
11250,
1330,
30218,
70,
198,
2,
11748,
9195,
13,
26791,
355,
3384,
4487,
628,
220,
220,
220,
1303,
... | 2.544643 | 112 |
# /etc/passwd fields.
_PASSWD_FIELDS = ('login', 'password', 'uid', 'gid', 'name', 'home', 'shell')#
| [
2,
1220,
14784,
14,
6603,
16993,
7032,
13,
198,
62,
47924,
22332,
62,
11674,
3698,
5258,
796,
19203,
38235,
3256,
705,
28712,
3256,
705,
27112,
3256,
705,
70,
312,
3256,
705,
3672,
3256,
705,
11195,
3256,
705,
29149,
11537,
2,
198
] | 2.463415 | 41 |
import os
import pathlib
import tkinter as tk
class HelpWindow(tk.Toplevel):
'''
Toplevel window for displaying the help pannel
'''
| [
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
198,
4871,
10478,
27703,
7,
30488,
13,
51,
643,
626,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
309,
643,
626,
4324,
329,
19407,
262,
... | 2.314286 | 70 |
import logging
from search.models import Task, TaskSimilarityScore, TaskServiceSimilarityScore
from human_services.services.models import Service
LOGGER = logging.getLogger(__name__)
| [
11748,
18931,
198,
6738,
2989,
13,
27530,
1330,
15941,
11,
15941,
18925,
414,
26595,
11,
15941,
16177,
18925,
414,
26595,
198,
6738,
1692,
62,
30416,
13,
30416,
13,
27530,
1330,
4809,
198,
198,
25294,
30373,
796,
18931,
13,
1136,
11187,
... | 3.8 | 50 |
# coding: utf-8
import argparse
import os
import starwar
SUPPORTED_BACKEND = ('json', 'mongodb', 'mysql', 'sqlite')
def expand_path(path):
"""Return the absolute path for a given path.
Expand `~` and `.` characters, transform relative path to absolute one.
"""
if path is None:
path = 'data'
path = os.path.abspath(os.path.expanduser(path))
try:
if not os.path.isdir(path):
os.mkdir(path)
except OSError as err:
print("Can not create directory: %s" % err.message)
return None
return path
if __name__ == '__main__':
main() | [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
198,
11748,
3491,
5767,
198,
198,
40331,
15490,
1961,
62,
31098,
10619,
796,
19203,
17752,
3256,
705,
31059,
375,
65,
3256,
705,
28744,
13976,
3256,
... | 2.405512 | 254 |
# Generated by Django 3.1.5 on 2021-01-29 10:34
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
20,
319,
33448,
12,
486,
12,
1959,
838,
25,
2682,
201,
198,
201,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
201,
198,
201,
198
] | 2.485714 | 35 |
"""Definition of the WordHippoEngine."""
import argparse
from collections.abc import Callable, Iterable
from dataclasses import dataclass
from enum import IntEnum
from typing import IO, Optional
from urllib.parse import quote
import lxml.etree
import requests
from dict.colors import COLOR_HIGHLIGHT, COLOR_RESET
from dict.engines.base import BaseEngine
from dict.pager import print_in_columns
MEANINGS_URL = (
"https://www.wordhippo.com/what-is/the-meaning-of-the-word/{}.html"
)
SYNONYMS_URL = "https://www.wordhippo.com/what-is/another-word-for/{}.html"
USER_AGENT = (
"Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0"
)
class WordHippoLookupMode(IntEnum):
"""WordHippo engine lookup target."""
SYNONYMS = 1
MEANINGS = 2
@dataclass
class BaseWordHippoResult:
"""Base WordHippo engine result."""
word_type: str
@property
def column_size(self) -> int:
"""Maximum item length to align the results in a table.
:return: max item length
"""
raise NotImplementedError("not implemented") # pragma: no cover
def print_to_stream(self, file: IO[str], column_size: int) -> None:
"""Print self to the given stream.
:param file: output stream
:param column_size: column size for aligning the results in a table
"""
raise NotImplementedError("not implemented") # pragma: no cover
TLookupFunc = Callable[[str], Iterable[BaseWordHippoResult]]
@dataclass
class WordHippoMeaningResult(BaseWordHippoResult):
"""WordHippo engine meaning result."""
meanings: list[str]
@property
@dataclass
class WordHippoSynonymResult(BaseWordHippoResult):
"""WordHippo engine synonym result."""
word_desc: str
synonyms: list[str]
@property
class WordHippoEngine(BaseEngine[BaseWordHippoResult]):
"""WordHippo engine."""
names = ["wordhippo"]
@staticmethod
@staticmethod
def get_synonyms(phrase: str) -> Iterable[WordHippoSynonymResult]:
"""Get synonyms for the given phrase.
:param phrase: phrase to look up
:return: a generator of synonyms
"""
url = SYNONYMS_URL.format(quote(phrase))
response = requests.get(url, headers={"User-Agent": USER_AGENT})
response.raise_for_status()
doc = lxml.etree.HTML(response.text)
for word_desc_node in doc.cssselect("div.tabdesc"):
word_type_node = word_desc_node.getprevious()
related_word_nodes = word_desc_node.getnext().cssselect("div.wb a")
yield WordHippoSynonymResult(
word_type=(word_type_node.text or "").strip(),
word_desc=_get_text_from_node(word_desc_node),
synonyms=list(map(_get_text_from_node, related_word_nodes)),
)
@staticmethod
def get_meanings(phrase: str) -> Iterable[WordHippoMeaningResult]:
"""Get meanings for the given phrase.
:param phrase: phrase to look up
:return: a generator of meanings
"""
url = MEANINGS_URL.format(quote(phrase))
response = requests.get(url, headers={"User-Agent": USER_AGENT})
response.raise_for_status()
doc = lxml.etree.HTML(response.text)
for word_type_node in doc.cssselect("div.defv2wordtype"):
meaning_word_nodes = word_type_node.getnext().cssselect(
".topleveldefinition li"
)
yield WordHippoMeaningResult(
word_type=_get_text_from_node(word_type_node),
meanings=list(map(_get_text_from_node, meaning_word_nodes)),
)
| [
37811,
36621,
286,
262,
9678,
39,
3974,
78,
13798,
526,
15931,
198,
11748,
1822,
29572,
198,
6738,
17268,
13,
39305,
1330,
4889,
540,
11,
40806,
540,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
33829,
1330,
2558,
483... | 2.395257 | 1,518 |
"""Test the Arnoldi implementation."""
from dla.linalg import arnoldi
import numpy as np
import numpy.testing as npt
from scipy.linalg import subspace_angles
a = np.array([
[9.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 8.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 5.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 6.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 8.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 5.0],
])
b = np.array([
[1.0],
[0.0],
[3.0],
[1.0],
[0.0],
[0.0],
])
def test_arnoldi_simple():
"""Test the Arnoldi algorithm for a simple system."""
num_directions = 3
x = np.empty((a.shape[0], num_directions))
x[:, 0] = b.squeeze()
x[:, 0] /= np.linalg.norm(x[:, 0])
for i in range(1, num_directions):
x[:, i] = np.linalg.solve(a, x[:, i - 1])
x[:, i] /= np.linalg.norm(x[:, i])
rks = arnoldi(a, b, num_directions)
npt.assert_almost_equal(np.abs(subspace_angles(x, rks)).max(), 0.0)
def test_arnoldi_xxl():
"""Test the Arnoldi algorithm for a larger system."""
np.random.seed(777)
a_xxl = np.random.rand(100, 100)
b_xxl = np.random.rand(100)
num_directions = 10
x = np.empty((a_xxl.shape[0], num_directions))
x[:, 0] = b_xxl
x[:, 0] /= np.linalg.norm(x[:, 0])
for i in range(1, num_directions):
x[:, i] = np.linalg.solve(a_xxl, x[:, i - 1])
x[:, i] /= np.linalg.norm(x[:, i])
rks = arnoldi(a_xxl, b_xxl, num_directions)
npt.assert_almost_equal(np.abs(subspace_angles(x, rks)).max(), 0.0)
def test_arnoldi_orthogonality():
"""Test if the Arnoldi implementation produces an orthogonal basis."""
num_directions = 4
rks = arnoldi(a, b, num_directions)
for i in range(num_directions):
for j in range(i):
npt.assert_almost_equal(np.dot(rks[:, i], rks[:, j]), 0.0)
def test_arnoldi_normalisation():
"""Test if the Arnoldi implementation produces an normalised basis."""
num_directions = 4
rks = arnoldi(a, b, num_directions)
npt.assert_almost_equal(
np.linalg.norm(rks, axis=0),
np.ones((num_directions,)),
)
| [
37811,
14402,
262,
21418,
72,
7822,
526,
15931,
198,
6738,
288,
5031,
13,
75,
1292,
70,
1330,
610,
77,
727,
72,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
33407,
355,
299,
457,
198,
6738,
629,
541,
88,
13,
75,
... | 1.96286 | 1,077 |
import urllib.parse, urllib.request, json, ssl
# Authentication and API Requests
# LEARNING LAB 2 Cisco Kinetic for Cities
# The Initial login steps are the same as Learning Lab 1.
# You can skip ahead to 'LEARNING LAB 2 CODE BEGINS HERE'
#Ignore invalid Certificates
ssl._create_default_https_context = ssl._create_unverified_context
############################### LEARNING LAB 2 CODE BEGINS HERE ############################
#
# In this example, we will exercise the CKC API: {{Platform Instance URL}}/cdp/v1/locations/user/{userId}/info
# In the case of the Sandbox lab, this resolves to https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/locations/user/{userId}/info
# The access_token and user_id from Learning Lab 1 will be used to obtain the current Users Location Information
print('Learning Lab 2 Starts Here:')
user_id = '86847897-ab35-489c-af17-6fbf301a6016'
access_token = '0f493c98-9689-37c4-ad76-b957020d0d6c'
#Define the required GET Headers needed by the CKC API
headers = {
'authorization': "Bearer " + access_token,
'Content-Type': "application/json"
}
#The URL with queryParms to request user details
requestUrl = 'https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/locations/user/' + user_id + '/info'
print('\nGetting User Location Info: (' + requestUrl + ')\n')
# create the request
request = urllib.request.Request(requestUrl, headers = headers)
# perform the request
response = urllib.request.urlopen(request)
results = response.read().decode(encoding)
responseDictionary = json.loads(results)
print('User Location Info:', results, '\n')
############################### LEARNING LAB 2 PART-2 ############################
#
# In this example, we will exercise the CKC API: {{Platform Instance URL}}/cdp/v1/capabilities/customer
# In the case of the Sandbox lab, this resolves to https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/capabilities/customer
# The access_token obtained as explained in Learning Lab 1 is used for authorization
#Define the required GET Headers needed by the CKC API
headers = {'authorization': "Bearer " + access_token }
#The URL with queryParms to request user details
requestUrl = 'https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/capabilities/customer'
print('\nGetting User capabilities: (' + requestUrl + ')\n')
# create the request
request = urllib.request.Request(requestUrl, headers = headers)
# perform the request
response = urllib.request.urlopen(request)
results = response.read().decode(encoding)
responseDictionary = json.loads(results)
print('User Capabilities:', results, '\n')
| [
11748,
2956,
297,
571,
13,
29572,
11,
2956,
297,
571,
13,
25927,
11,
33918,
11,
264,
6649,
198,
198,
2,
48191,
290,
7824,
9394,
3558,
198,
198,
2,
12509,
1503,
15871,
406,
6242,
362,
220,
28289,
16645,
5139,
329,
20830,
198,
2,
383,... | 3.107488 | 828 |
import pkg_resources
import requests
import pip
from gitcd.exceptions import GitcdPyPiApiException
| [
11748,
279,
10025,
62,
37540,
198,
11748,
7007,
198,
11748,
7347,
198,
6738,
17606,
10210,
13,
1069,
11755,
1330,
15151,
10210,
20519,
38729,
32,
14415,
16922,
628
] | 3.703704 | 27 |
from charlatan.helper import fetch
from random import choice, uniform
from charlatan.misc.business import CURRENCIES, CURRENCY_SYMBOLS
| [
6738,
1149,
15460,
272,
13,
2978,
525,
1330,
21207,
201,
198,
6738,
4738,
1330,
3572,
11,
8187,
201,
198,
6738,
1149,
15460,
272,
13,
44374,
13,
22680,
1330,
327,
31302,
24181,
11015,
11,
327,
31302,
45155,
62,
23060,
10744,
3535,
50,
... | 3.159091 | 44 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry.testutils import TestCase, fixture
from sentry.interfaces import Query
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
1908,
563,
13,
9288,
26791,
1330,
6208,
20448,
11,
29220,
198,
6738,
1908,
563,
13,
3849,
32186,
133... | 3.23913 | 46 |
#Exercício 3
#A - 1,3,5,7,
#B 2,4,8,16,32,64
#C 0,1,4,9,16,32,49
#D 4,16,36,64
#E 1,1,2,3,5,8
#F 2,10,12,16,17,18,19
print(f'O proximo termo: 200. Nao achei uma forma logica de calcular o valor, apenas supondo que todos comecem com a leta D')
exA(5)
print('\n')
exB(7)
print('\n')
exC(8)
print('\n')
exD(64)
print('\n')
print(f'O proximo termo: {exE(8)}') | [
2,
3109,
2798,
8836,
66,
952,
513,
198,
198,
2,
32,
532,
352,
11,
18,
11,
20,
11,
22,
11,
198,
220,
220,
220,
220,
198,
2,
33,
362,
11,
19,
11,
23,
11,
1433,
11,
2624,
11,
2414,
198,
220,
220,
220,
220,
220,
220,
220,
220,... | 1.672489 | 229 |
# Create nodes associated with Master Controller Board of Shoelace Antenna matching network.
# 23 Jan 2012, Ted Golfinopoulos
from MDSplus import *
tree=Tree("MAGNETICS", -1) #Open model tree
#For now, do work on a test shot until design is in steady state.
s=1120125998
tree.createPulse(s) #Create a test shot
tree = Tree("MAGNETICS", s, "EDIT")
# Set the default directory to SHOELACE location.
tree.setDefault(tree.getNode("SHOELACE"))
# Add a child node (subtree, "STRUCTURE") for the Master Controller Board (MCB) -
# this will store the associated nodes for the board outputs.
tree.addNode("MCB_OUT", "STRUCTURE")
tree.setDefault(tree.getNode("MCB_OUT")) #Make this child node default for further relative path references
tree.addNode("COMMENT", "TEXT")
tree.getNode("COMMENT").putData("Nodes pertaining to processing of outputs from MASTER CONTROLLER BOARD (MCB). ENCODER_CLK=clock frequency used by number encoder - divided down from MCB clock (F_CLK).")
tree.addNode("ENCODER_CLK", "NUMERIC")
tree.getNode("ENCODER_CLK").putData(4.0E6/16.0)
#Add nodes for logic states (codes) for series and parallel capacitor banks
### CONVENIENCE FUNCTIONS
#Function to print string for building a raw voltage signal given path references.
#Function setting up nodes for series and parallel codes (stores time-encoded signal). Identical topology for series and parallel caps - only names change.
# tree.setDefault(tree.getNode(".-.")) #Reset default to parent level.
###
#Write in changes
#tree.write()
#Stamp in serial code nodes
nodeName="SER_CODE"
datPath="\\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_09"
comment=nodeName+"=number encoding which boards should turn on which capacitors in associated series bank. (Number which specifies tuning configuration for caps.); NBITS=number of bits in binary number encoded in signal"
buildCodeNode(tree,nodeName,datPath, comment)
#Stamp in parallel code nodes
nodeName="PAR_CODE"
datPath="\\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_10"
comment=nodeName+"=number encoding which boards should turn on which capacitors in associated parallel bank. (Number which specifies tuning configuration for caps.); NBITS=number of bits in binary number encoded in signal"
buildCodeNode(tree,nodeName,datPath, comment)
#Add node for interpreting frequency determined from period counter on MCB.
nodeName="N_PER"
datPath="\\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_11"
comment="N_PER=number of clock cycles counted in M periods of sync signal; M=Number of signal (sync) cycles over which sync period is counted; F_CLK=clock frequency of Master Controller Board (MCB); FREQ_OUT = F_CLK*M/N_PER [Hz]; NBITS=number of bits in binary number encoded in signals"
#Set up basic node template.
buildCodeNode(tree, nodeName, datPath, comment)
#Add additional nodes associated with converting clock counts per period into frequency.
tree.setDefault(tree.getNode("N_PER"))
tree.addNode("M", "NUMERIC")
tree.getNode("M").putData(50.) #Number of sync counts in accumulation period
tree.addNode("F_CLK", "NUMERIC")
tree.getNode("F_CLK").putData(4000000.) # Clock frequency on MCB [Hz]
tree.addNode("FREQ_OUT", "SIGNAL") #This node will hold the calculated frequency from the MCB.
tree.getNode("NBITS").putData(14) #Overwrite previous nbits number with correct amount for period counter.
datPath="\\MAGNETICS::TOP.SHOELACE.MCB_OUT:N_PER"
freqCalcTdi="GETNCI("+datPath+", \"ON\") ? Build_Signal(Build_With_Units(F_CLK*M/("+datPath+"), \"Hz\"), *, DIM_OF("+datPath+") ) : ABORT()" #Ternary operator determining whether parent node is on; calculate frequency using clock counts.
print(freqCalcTdi)
tree.getNode("FREQ_OUT").putData(Data.compile(freqCalcTdi))
#Write changes to tree.
tree.write()
#GETNCI(\MAGNETICS::TOP.SHOELACE.MCB_OUT:SER_CODE, "ON") ? Build_Signal(Build_With_Units(\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_09, "V"), *, DIM_OF(\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_09) : ABORT()
#GETNCI(BP1T_GHK, "ON") ? Build_Signal(Build_With_Units(.-.DATA_ACQ.CPCI:ACQ_216_1:INPUT_07 * 1 / (\MAG_RF_COILS:CALIB[59] * 1), "Tesla/s"), *, DIM_OF(.-.DATA_ACQ.CPCI:ACQ_216_1:INPUT_07)) : ABORT()
#\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_09
| [
2,
13610,
13760,
3917,
351,
5599,
22741,
5926,
286,
16509,
417,
558,
3738,
13713,
12336,
3127,
13,
198,
2,
2242,
2365,
2321,
11,
11396,
19709,
259,
20338,
198,
198,
6738,
337,
5258,
9541,
1330,
1635,
198,
198,
21048,
28,
27660,
7203,
... | 2.933242 | 1,453 |
import logging
import logging.handlers
import time
from typing import Literal, List, Any
from pidal import NAME
from pidal.config import LoggingConfig
logger = logging.getLogger(NAME)
handler_map = {
"NullHandler": logging.NullHandler,
"StreamHandler": logging.StreamHandler,
"FileHandler": logging.FileHandler,
"RotatingFileHandler": logging.handlers.RotatingFileHandler,
"TimedRotatingFileHandler": logging.handlers.TimedRotatingFileHandler,
"SysLogHandler": logging.handlers.SysLogHandler,
"HTTPHandler": logging.handlers.HTTPHandler,
"QueueHandler": logging.handlers.QueueHandler,
}
| [
11748,
18931,
198,
11748,
18931,
13,
4993,
8116,
198,
11748,
640,
198,
198,
6738,
19720,
1330,
25659,
1691,
11,
7343,
11,
4377,
198,
198,
6738,
279,
11624,
1330,
36751,
198,
6738,
279,
11624,
13,
11250,
1330,
5972,
2667,
16934,
628,
198... | 2.830508 | 236 |
import json
import re
from textwrap import dedent
def parse_gh_output(result):
"""Extract a dict of GitHub Workflow set-output variables from result's output"""
matches = re.findall(r"::set-output\s+name=(\w+)::(.*)\n", result.out)
if matches is None:
return dict()
return dict(matches)
def test_custom_var(tox_ini, cmd):
"""--gh-matrix takes optional output variable name"""
tox_ini(
"""
[tox]
envlist = lint,test
"""
)
result = cmd("--gh-matrix=myvarname")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
assert "myvarname" in gh_output
assert "envlist" not in gh_output # default not set
envlist = json.loads(gh_output["myvarname"])
assert envlist == [
{"name": "lint", "factors": ["lint"]},
{"name": "test", "factors": ["test"]},
]
def test_installed_python(tox_ini, cmd, mock_interpreter):
"""--gh-matrix provides 'python_installed' versions for available interpreters"""
mock_interpreter("python3.5", version_info=(3, 5, 6, "final", 0))
mock_interpreter("python3.10")
mock_interpreter("pypy3.8")
tox_ini(
"""
[tox]
envlist = py{27,35,310},pypy38
"""
)
result = cmd("--gh-matrix")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
envlist = json.loads(gh_output["envlist"])
assert envlist == [
{
"name": "py27",
"factors": ["py27"],
"python": {"version": "2.7", "spec": "2.7.0-alpha - 2.7"},
},
{
"name": "py35",
"factors": ["py35"],
"python": {"version": "3.5", "spec": "3.5.0-alpha - 3.5", "installed": "3.5.6"},
},
{
"name": "py310",
"factors": ["py310"],
"python": {
"version": "3.10",
"spec": "3.10.0-alpha - 3.10",
"installed": "3.10.0",
},
},
{
"name": "pypy38",
"factors": ["pypy38"],
"python": {
"version": "pypy-3.8",
"spec": "pypy-3.8",
"installed": "pypy-3.8.0-3.7.0",
},
},
]
def test_base_python(tox_ini, cmd, mock_interpreter):
"""Python version can come from an env's basepython"""
tox_ini(
"""
[tox]
envlist = check,build
[testenv:build]
basepython = python3.9
"""
)
result = cmd("--gh-matrix")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
envlist = json.loads(gh_output["envlist"])
assert envlist == [
{"name": "check", "factors": ["check"]},
{
"name": "build",
"factors": ["build"],
"python": {"version": "3.9", "spec": "3.9.0-alpha - 3.9"},
},
]
def test_ignore_outcome(tox_ini, cmd):
"""--gh-matrix identifies tox envs with ignore_outcome set"""
tox_ini(
"""
[tox]
envlist = release,dev
[testenv:dev]
ignore_outcome = true
"""
)
result = cmd("--gh-matrix")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
envlist = json.loads(gh_output["envlist"])
assert envlist == [
{"name": "release", "factors": ["release"]},
{"name": "dev", "factors": ["dev"], "ignore_outcome": True},
]
def test_limited_envlist(tox_ini, cmd):
"""Explicit -e envlist limits --gh-matrix output"""
tox_ini(
"""
[tox]
envlist = py{27,35,36,37,38,39,310}
"""
)
result = cmd("--gh-matrix", "-e", "py35,py39,unknown-env")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
assert "envlist" in gh_output
envlist = json.loads(gh_output["envlist"])
envnames = [env["name"] for env in envlist]
assert envnames == ["py35", "py39"]
assert "unknown-env" not in envnames
def test_skip_env(tox_ini, cmd, monkeypatch):
"""--gh-matrix filters out matches for TOX_SKIPENV"""
tox_ini(
"""
[tox]
envlist = py{38,39}-{unix,win,mac}
"""
)
# TOX_SKIPENV is a Python regular expression that must match
# the _entire_ envname to remove that env.
monkeypatch.setenv("TOX_SKIP_ENV", ".*-(unix|mac)")
result = cmd("--gh-matrix")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
envlist = json.loads(gh_output["envlist"])
envnames = [env["name"] for env in envlist]
assert envnames == ["py38-win", "py39-win"]
| [
11748,
33918,
198,
11748,
302,
198,
6738,
2420,
37150,
1330,
4648,
298,
628,
198,
4299,
21136,
62,
456,
62,
22915,
7,
20274,
2599,
198,
220,
220,
220,
37227,
11627,
974,
257,
8633,
286,
21722,
5521,
11125,
900,
12,
22915,
9633,
422,
1... | 2.027577 | 2,357 |
#!/usr/local/bin/python
# Code Fights Chess Board Cell Color Problem
def chessBoardCellColor(cell1, cell2):
'''
Determine if the two given cells on chess board are same color
A, C, E, G odd cells are same color as B, D, F, H even cells
'''
return get_color(cell1) == get_color(cell2)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
198,
2,
6127,
376,
2337,
25774,
5926,
12440,
5315,
20647,
628,
198,
4299,
19780,
29828,
28780,
10258,
7,
3846,
16,
11,
2685,
17,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
22... | 2.710938 | 128 |
# Challenge 16 Easy
| [
2,
13879,
1467,
16789,
201,
198,
201,
198
] | 2.875 | 8 |
import json
with open('pokemons.json') as file:
content = file.read() # leitura do arquivo
pokemons = json.loads(content)['results'] # o conteúdo é transformado em estrutura python equivalente, dicionário neste caso.
# acessamos a chave results que é onde contém nossa lista de pokemons
print(pokemons[0]) # imprime o primeiro pokemon da lista
# A leitura pode ser feita diretamente do arquivo, utilizando o método load ao invés de loads.
# O loads carrega o JSON a partir de um texto e o load carrega o JSON a partir de um arquivo.
# load ja ler o arquivo e retorna
with open("pokemons.json") as file:
pokemons = json.load(file)["results"]
print(pokemons[0]) # imprime o primeiro pokemon da lista
# Escrita de dados
# A escrita de arquivos no formato JSON é similar a escrita
# de arquivos comum, porém primeiro temos de transformar os dados.
# Lendo
with open("pokemons.json") as file:
pokemons = json.load(file)["results"]
# Filtarndo
grass_type_pokemons = [
pokemon for pokemon in pokemons if "Grass" in pokemon["type"]
]
# Escrevendo
# Abre o arquivo para escrevermos apenas o pokemons do tipo grama
with open("pokemons_file.json", "w") as file:
json_poke = json.dumps(grass_type_pokemons) # conversão de Python para o formato json (str)
file.write(json_poke)
# Assim como a desserialização, que faz a transformação de texto em formato JSON para Python ,
# a serialização, que é o caminho inverso, também possui um método equivalente para escrever em
# arquivos de forma direta.
# leitura de todos os pokemons
with open("pokemons.json") as file:
pokemons = json.load(file)["results"]
# separamos somente os do tipo grama
grass_type_pokemons = [
pokemon for pokemon in pokemons if "Grass" in pokemon["type"]
]
# abre o arquivo para escrita
with open("grass_pokemons.json", "w") as file:
# escreve no arquivo já transformando em formato json a estrutura
json.dump(grass_type_pokemons, file) | [
11748,
33918,
628,
198,
4480,
1280,
10786,
35924,
11567,
13,
17752,
11537,
355,
2393,
25,
198,
220,
220,
220,
2695,
796,
2393,
13,
961,
3419,
220,
1303,
443,
270,
5330,
466,
610,
421,
23593,
198,
220,
220,
220,
22620,
11567,
796,
3391... | 2.738526 | 719 |
#!/usr/local/bin/python3
# ZyExpander: A Nested ZipFile Expander For ZyBooks.
# By: Michael Green
# Expands and organizes by student nested zip files that ZyBook's grading
# system uses for some reason.
import zipfile
import os
import argparse
# Goes through our argument list and pulls out the files to process along with any
# optional inputs specified.
# Verifying that input arguments supplied are existing zip files
init()
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
18,
198,
2,
40905,
16870,
4066,
25,
317,
399,
7287,
38636,
8979,
5518,
4066,
1114,
40905,
30650,
13,
198,
2,
2750,
25,
3899,
3469,
198,
2,
5518,
1746,
290,
1618,
4340,
416,
3710,
2837... | 3.858407 | 113 |
#!/usr/bin/env python
import os
import pygame
# Run like this: MapImageToDatConverter.py ..\unusedAssets\maps\brecconary.png data\maps\brecconary.dat
if __name__ == '__main__':
try:
main()
except Exception as e:
import sys
import traceback
print(traceback.format_exception(None, # <- type(e) by docs, but ignored
e,
e.__traceback__),
file=sys.stderr, flush=True)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
12972,
6057,
628,
198,
2,
5660,
588,
428,
25,
9347,
5159,
2514,
27354,
3103,
332,
353,
13,
9078,
11485,
59,
403,
1484,
8021,
1039,
59,
31803,
59,
4679,
... | 1.90566 | 265 |
from faker import Faker
fake = Faker() | [
6738,
277,
3110,
1330,
376,
3110,
198,
198,
30706,
796,
376,
3110,
3419
] | 3 | 13 |
from ._download import download
def titanic(directory: str):
"""downloads 'titanic.csv' in the given directory"""
download(directory, "titanic.csv",
"https://drive.google.com/file/d/"
"1LYjbHW3wyJSMzGMMCmaOFNA_RIKqxRoI/view?usp=sharing")
| [
6738,
47540,
15002,
1330,
4321,
628,
198,
4299,
5259,
26277,
7,
34945,
25,
965,
2599,
198,
220,
220,
220,
37227,
15002,
82,
705,
83,
18642,
291,
13,
40664,
6,
287,
262,
1813,
8619,
37811,
198,
220,
220,
220,
4321,
7,
34945,
11,
366,... | 2.283333 | 120 |
from array import *
fibo()
| [
6738,
7177,
1330,
1635,
198,
198,
69,
26762,
3419,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220
] | 1.782609 | 23 |
# Copyright (c) 2015 Hewlett-Packard. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import mock
# mocking muranoclient so that python-muranoclient
# doesn't need to be included in requirements.txt.
# (Including python-muranoclient in requirements.txt will
# cause failures in Jenkins because python-muranoclient is not
# included in global_requirements.txt at this point)
import sys
sys.modules['muranoclient'] = mock.Mock()
sys.modules['muranoclient.client'] = mock.Mock()
sys.modules['muranoclient.common'] = mock.Mock()
sys.modules['muranoclient.common.exceptions'] = mock.Mock()
from congress.datasources import murano_driver
from congress.tests import base
from congress.tests.datasources import util
from congress.tests import helper
# Sample responses from murano-client
env_response = [
util.ResponseObj({
u'created': u'2015-03-24T18:35:14',
u'id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'name': u'quick-env-2',
u'networking': {},
u'status': u'deploy failure',
u'tenant_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'updated': u'2015-03-24T18:46:56',
u'version': 1})]
service_response = [
util.ResponseObj({
u'?': {u'_26411a1861294160833743e45d0eaad9': {u'name': u'MySQL'},
u'_actions': {u'74f5b2d2-1f8d-4b1a-8238-4155ce2cadb2_restartVM':
{u'enabled': True, u'name': u'restartVM'}},
u'id': u'769af50c-9629-4694-b623-e9b392941279',
u'status': u'deploy failure',
u'type': u'io.murano.databases.MySql'},
u'database': u'',
u'instance': {u'?': {u'_actions': {},
u'id': u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'type':
u'io.murano.resources.LinuxMuranoInstance'},
u'assignFloatingIp': True,
u'availabilityZone': u'nova',
u'flavor': u'm1.small',
u'floatingIpAddress': u'172.24.4.4',
u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3',
u'ipAddresses': [u'10.0.11.3', u'172.24.4.4'],
u'keyname': u'',
u'name': u'bcnfli7nn738y1',
u'networks': {u'customNetworks': [],
u'primaryNetwork': None,
u'useEnvironmentNetwork': True,
u'useFlatNetwork': False},
u'securityGroupName': None,
u'sharedIps': []},
u'name': u'MySqlDB',
u'password': u'Passw0rd.',
u'username': u''}),
util.ResponseObj({
u'?': {u'_26411a1861294160833743e45d0eaad9':
{u'name': u'Apache Tomcat'},
u'_actions': {},
u'id': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'status': u'deploy failure',
u'type': u'io.murano.apps.apache.Tomcat'},
u'instance': {u'?': {u'_actions': {},
u'id': u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'type':
u'io.murano.resources.LinuxMuranoInstance'},
u'assignFloatingIp': True,
u'availabilityZone': u'nova',
u'flavor': u'm1.small',
u'floatingIpAddress': u'172.24.4.4',
u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3',
u'ipAddresses': [u'10.0.11.4', u'172.24.4.4'],
u'keyname': u'',
u'name': u'woydqi7nn7ipc2',
u'networks': {u'customNetworks': [],
u'primaryNetwork': None,
u'useEnvironmentNetwork': True,
u'useFlatNetwork': False},
u'securityGroupName': None,
u'sharedIps': []},
u'name': u'Tomcat'}),
util.ResponseObj({
u'?': {u'_26411a1861294160833743e45d0eaad9': {u'name': u'PetClinic'},
u'_actions': {},
u'id': u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'status': u'deploy failure',
u'type': u'io.murano.apps.java.PetClinic'},
u'database': u'769af50c-9629-4694-b623-e9b392941279',
u'dbName': u'pet_db',
u'dbPassword': u'Passw0rd.',
u'dbUser': u'pet_user',
u'name': u'PetClinic',
u'tomcat': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'warLocation':
u'https://dl.dropboxusercontent.com/u/1684617/petclinic.war'})]
deployment_response = [
util.ResponseObj({
u'action': {u'args': {},
u'method': u'deploy',
u'object_id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc'},
u'created': u'2015-03-24T18:36:23',
u'description':
{u'?': {u'id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'type': u'io.murano.Environment'},
u'defaultNetworks':
{u'environment':
{u'?': {u'id':
u'a2be8265b01743c0bdf645772d632bf0',
u'type': u'io.murano.resources.NeutronNetwork'},
u'name': u'quick-env-2-network'},
u'flat': None},
u'name': u'quick-env-2',
u'services':
[{u'?':
{u'_26411a1861294160833743e45d0eaad9':
{u'name': u'MySQL'},
u'id': u'769af50c-9629-4694-b623-e9b392941279',
u'type': u'io.murano.databases.MySql'},
u'database': u'',
u'instance':
{u'?': {u'id': u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'type': u'io.murano.resources.LinuxMuranoInstance'},
u'assignFloatingIp': True,
u'availabilityZone': u'nova',
u'flavor': u'm1.small',
u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3',
u'keyname': u'',
u'name': u'bcnfli7nn738y1'},
u'name': u'MySqlDB',
u'password': u'*** SANITIZED ***',
u'username': u''},
{u'?':
{u'_26411a1861294160833743e45d0eaad9': {u'name': u'Apache Tomcat'},
u'id': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'type': u'io.murano.apps.apache.Tomcat'},
u'instance':
{u'?': {u'id': u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'type': u'io.murano.resources.LinuxMuranoInstance'},
u'assignFloatingIp': True,
u'availabilityZone': u'nova',
u'flavor': u'm1.small',
u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3',
u'keyname': u'',
u'name': u'woydqi7nn7ipc2'},
u'name': u'Tomcat'},
{u'?': {u'_26411a1861294160833743e45d0eaad9':
{u'name': u'PetClinic'},
u'id': u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'type': u'io.murano.apps.java.PetClinic'},
u'database': u'769af50c-9629-4694-b623-e9b392941279',
u'dbName': u'pet_db',
u'dbPassword': u'*** SANITIZED ***',
u'dbUser': u'pet_user',
u'name': u'PetClinic',
u'tomcat': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'warLocation':
u'https://dl.dropboxusercontent.com/u/1684617/petclinic.war'}]},
u'environment_id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'finished': u'2015-03-24T18:46:56',
u'id': u'4aa60b31d8ce434284e03aa13c6e11e0',
u'result': {u'isException': True,
u'result':
{u'details': u'murano.common.exceptions.TimeoutException:'
' The Agent does not respondwithin 600 seconds',
u'message': u'[murano.common.exceptions.TimeoutException]'
': The Agent does not respondwithin 600 seconds'}},
u'started': u'2015-03-24T18:36:23',
u'state': u'completed_w_errors',
u'updated': u'2015-03-24T18:46:56'})]
package_response = [
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.apps.apache.Tomcat'],
u'created': u'2015-03-23T21:28:11',
u'description': u'Apache Tomcat is an open source software '
'implementation of the Java Servlet and JavaServer Pages '
'technologies.\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.apps.apache.Tomcat',
u'id': u'a7d64980999948dc96401cdce5ae2141',
u'is_public': False,
u'name': u'Apache Tomcat',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'Servlets', u'Server', u'Pages', u'Java'],
u'type': u'Application',
u'updated': u'2015-03-23T21:28:11'}),
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.apps.linux.Git'],
u'created': u'2015-03-23T21:26:56',
u'description': u'Simple Git repo hosted on Linux VM.\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.apps.linux.Git',
u'id': u'3ff58cdfeb27487fb3127fb8fd45109c',
u'is_public': False,
u'name': u'Git',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'Linux', u'connection'],
u'type': u'Application',
u'updated': u'2015-03-23T21:26:56'}),
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.databases.MySql'],
u'created': u'2015-03-23T21:28:58',
u'description': u'MySql is a relational database management system '
'(RDBMS), and ships with\nno GUI tools to administer MySQL databases '
'or manage data contained within\nthe databases.\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.databases.MySql',
u'id': u'884b764c0ce6439d8566b3b2da967687',
u'is_public': False,
u'name': u'MySQL',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'Database', u'MySql', u'SQL', u'RDBMS'],
u'type': u'Application',
u'updated': u'2015-03-23T21:28:58'}),
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.apps.java.PetClinic'],
u'created': u'2015-03-24T18:25:24',
u'description': u'An example of a Java app running on a '
'Apache Tomcat Servlet container and using the either Postgre SQL, '
'or MySql database\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.apps.java.PetClinic',
u'id': u'9f7c9e2ed8f9462a8f9037032ab64755',
u'is_public': False,
u'name': u'PetClinic',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'Servlets', u'Server', u'Pages', u'Java'],
u'type': u'Application',
u'updated': u'2015-03-24T18:25:24'}),
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.databases.PostgreSql'],
u'created': u'2015-03-23T21:29:10',
u'description': u'PostgreSQL is a powerful, open source '
'object-relational database system.\nIt has more than 15 years '
'of active development and a proven architecture\nthat has earned '
'it a strong reputation for reliability, data integrity,\nand '
'correctness.\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.databases.PostgreSql',
u'id': u'4b9c6a24c2e64f928156e0c87324c394',
u'is_public': False,
u'name': u'PostgreSQL',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'Database', u'Postgre', u'SQL', u'RDBMS'],
u'type': u'Application',
u'updated': u'2015-03-23T21:29:10'}),
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.databases.SqlDatabase'],
u'created': u'2015-03-24T18:26:32',
u'description': u'This is the interface defining API for different '
'SQL - RDBMS databases\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.databases',
u'id': u'5add5a561da341c4875495c5887957a8',
u'is_public': False,
u'name': u'SQL Library',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'SQL', u'RDBMS'],
u'type': u'Library',
u'updated': u'2015-03-24T18:26:32'})]
action_response = 'c79eb72600024fa1995345a2b2eb3acd'
# Expected datasource table content
expected_states = [
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'deploy failure'),
]
expected_environment_parent_types = [
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', 'io.murano.Object'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', 'io.murano.Environment'),
]
expected_env_properties = [
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'created', '2015-03-24T18:35:14'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'version', 1),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'status', 'deploy failure'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'name', 'quick-env-2'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'updated', '2015-03-24T18:46:56'),
]
expected_service_properties = [
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'ipAddresses', '10.0.11.3'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'ipAddresses', '172.24.4.4'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'networks.useFlatNetwork', False),
(u'769af50c-9629-4694-b623-e9b392941279', u'name', 'MySqlDB'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'networks.useEnvironmentNetwork', True),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'floatingIpAddress', '172.24.4.4'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570', u'dbPassword', 'Passw0rd.'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'database', '769af50c-9629-4694-b623-e9b392941279'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'tomcat', 'ea6a7d9b-7799-4d00-9db3-4573cb94daec'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570', u'warLocation',
'https://dl.dropboxusercontent.com/u/1684617/petclinic.war'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'availabilityZone', 'nova'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'name', 'bcnfli7nn738y1'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570', u'dbUser', 'pet_user'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'image', '66e015aa-33c5-41ff-9b81-d8d17f9b02c3'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'flavor', 'm1.small'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'ipAddresses', '10.0.11.4'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'name', 'woydqi7nn7ipc2'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570', u'name', 'PetClinic'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'assignFloatingIp', True),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'assignFloatingIp', True),
(u'769af50c-9629-4694-b623-e9b392941279', u'password', 'Passw0rd.'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'flavor', 'm1.small'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570', u'dbName', 'pet_db'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'networks.useFlatNetwork', False),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'networks.useEnvironmentNetwork', True),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'availabilityZone', 'nova'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'floatingIpAddress', '172.24.4.4'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'ipAddresses', '172.24.4.4'),
(u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', u'name', 'Tomcat'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'image', '66e015aa-33c5-41ff-9b81-d8d17f9b02c3'),
]
expected_package_properties = [
(u'4b9c6a24c2e64f928156e0c87324c394', u'is_public', False),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'tags', 'connection'),
(u'884b764c0ce6439d8566b3b2da967687', u'created', '2015-03-23T21:28:58'),
(u'884b764c0ce6439d8566b3b2da967687', u'tags', 'SQL'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Servlets'),
(u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Servlets'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'created', '2015-03-23T21:29:10'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'fully_qualified_name',
'io.murano.apps.java.PetClinic'),
(u'884b764c0ce6439d8566b3b2da967687', u'type', 'Application'),
(u'5add5a561da341c4875495c5887957a8', u'created', '2015-03-24T18:26:32'),
(u'884b764c0ce6439d8566b3b2da967687', u'name', 'MySQL'),
(u'884b764c0ce6439d8566b3b2da967687', u'tags', 'Database'),
(u'5add5a561da341c4875495c5887957a8', u'enabled', True),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Pages'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'Database'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'type', 'Application'),
(u'5add5a561da341c4875495c5887957a8', u'type', 'Library'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'type', 'Application'),
(u'884b764c0ce6439d8566b3b2da967687', u'tags', 'MySql'),
(u'5add5a561da341c4875495c5887957a8', u'fully_qualified_name',
'io.murano.databases'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'author', 'Mirantis, Inc'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'is_public', False),
(u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'SQL'),
(u'884b764c0ce6439d8566b3b2da967687', u'enabled', True),
(u'4b9c6a24c2e64f928156e0c87324c394', u'updated', '2015-03-23T21:29:10'),
(u'884b764c0ce6439d8566b3b2da967687', u'fully_qualified_name',
'io.murano.databases.MySql'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'name', 'PetClinic'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'fully_qualified_name',
'io.murano.databases.PostgreSql'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Java'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'Postgre'),
(u'a7d64980999948dc96401cdce5ae2141', u'is_public', False),
(u'a7d64980999948dc96401cdce5ae2141', u'type', 'Application'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'name', 'PostgreSQL'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'tags', 'Linux'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'author', 'Mirantis, Inc'),
(u'5add5a561da341c4875495c5887957a8', u'is_public', False),
(u'5add5a561da341c4875495c5887957a8', u'tags', 'SQL'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'author', 'Mirantis, Inc'),
(u'5add5a561da341c4875495c5887957a8', u'class_definitions',
'io.murano.databases.SqlDatabase'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'updated', '2015-03-23T21:26:56'),
(u'5add5a561da341c4875495c5887957a8', u'tags', 'RDBMS'),
(u'a7d64980999948dc96401cdce5ae2141', u'enabled', True),
(u'5add5a561da341c4875495c5887957a8', u'updated', '2015-03-24T18:26:32'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'class_definitions',
'io.murano.apps.java.PetClinic'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'enabled', True),
(u'a7d64980999948dc96401cdce5ae2141', u'class_definitions',
'io.murano.apps.apache.Tomcat'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'created', '2015-03-24T18:25:24'),
(u'5add5a561da341c4875495c5887957a8', u'author', 'Mirantis, Inc'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'is_public', False),
(u'884b764c0ce6439d8566b3b2da967687', u'class_definitions',
'io.murano.databases.MySql'),
(u'884b764c0ce6439d8566b3b2da967687', u'is_public', False),
(u'884b764c0ce6439d8566b3b2da967687', u'tags', 'RDBMS'),
(u'a7d64980999948dc96401cdce5ae2141', u'author', 'Mirantis, Inc'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'name', 'Git'),
(u'a7d64980999948dc96401cdce5ae2141', u'fully_qualified_name',
'io.murano.apps.apache.Tomcat'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Server'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'RDBMS'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'class_definitions',
'io.murano.databases.PostgreSql'),
(u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Pages'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'enabled', True),
(u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Server'),
(u'a7d64980999948dc96401cdce5ae2141', u'updated', '2015-03-23T21:28:11'),
(u'884b764c0ce6439d8566b3b2da967687', u'updated', '2015-03-23T21:28:58'),
(u'a7d64980999948dc96401cdce5ae2141', u'name', 'Apache Tomcat'),
(u'884b764c0ce6439d8566b3b2da967687', u'author', 'Mirantis, Inc'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'enabled', True),
(u'a7d64980999948dc96401cdce5ae2141', u'created', '2015-03-23T21:28:11'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'created', '2015-03-23T21:26:56'),
(u'5add5a561da341c4875495c5887957a8', u'name', 'SQL Library'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'type', 'Application'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'fully_qualified_name',
'io.murano.apps.linux.Git'),
(u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Java'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'updated', '2015-03-24T18:25:24'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'class_definitions',
'io.murano.apps.linux.Git'),
]
expected_service_objects = [
(u'769af50c-9629-4694-b623-e9b392941279',
u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'io.murano.databases.MySql'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'io.murano.apps.java.PetClinic'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'769af50c-9629-4694-b623-e9b392941279',
u'io.murano.resources.LinuxMuranoInstance'),
(u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'io.murano.apps.apache.Tomcat'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'io.murano.resources.LinuxMuranoInstance'),
]
expected_package_objects = [
(u'5add5a561da341c4875495c5887957a8',
u'610c6afc1fc54d23a58d316bf76e5f42', u'Library'),
(u'4b9c6a24c2e64f928156e0c87324c394',
u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'),
(u'3ff58cdfeb27487fb3127fb8fd45109c',
u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'),
(u'a7d64980999948dc96401cdce5ae2141',
u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'),
(u'9f7c9e2ed8f9462a8f9037032ab64755',
u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'),
(u'884b764c0ce6439d8566b3b2da967687',
u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'),
]
expected_service_parent_types = [
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', 'io.murano.resources.Instance'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
'io.murano.resources.LinuxInstance'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', 'io.murano.Object'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
'io.murano.resources.LinuxMuranoInstance'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
'io.murano.resources.LinuxInstance'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
'io.murano.resources.LinuxMuranoInstance'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', 'io.murano.Object'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', 'io.murano.resources.Instance'),
]
expected_service_relationships = [
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'769af50c-9629-4694-b623-e9b392941279', u'database'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', 'services'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', u'tomcat'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'769af50c-9629-4694-b623-e9b392941279', 'services'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'fda74653-8b66-42e2-be16-12ebc87d7570', 'services'),
(u'769af50c-9629-4694-b623-e9b392941279',
u'76b9ca88-c668-4e37-a830-5845adc10b0e', 'instance'),
(u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', 'instance'),
]
expected_connected = [
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'ea6a7d9b-7799-4d00-9db3-4573cb94daec'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'c52dda24-38d6-4f2f-9184-abca0beaa6e9'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'769af50c-9629-4694-b623-e9b392941279'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'ea6a7d9b-7799-4d00-9db3-4573cb94daec'),
(u'769af50c-9629-4694-b623-e9b392941279',
u'76b9ca88-c668-4e37-a830-5845adc10b0e'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'fda74653-8b66-42e2-be16-12ebc87d7570'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'769af50c-9629-4694-b623-e9b392941279'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'76b9ca88-c668-4e37-a830-5845adc10b0e'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'76b9ca88-c668-4e37-a830-5845adc10b0e'),
(u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'c52dda24-38d6-4f2f-9184-abca0beaa6e9'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'c52dda24-38d6-4f2f-9184-abca0beaa6e9'),
]
expected_deployment_objects = [
(u'a2be8265b01743c0bdf645772d632bf0', u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'io.murano.resources.NeutronNetwork')
]
expected_deployment_properties = [
(u'a2be8265b01743c0bdf645772d632bf0', u'name', 'quick-env-2-network')
]
expected_deployment_parent_types = [
(u'a2be8265b01743c0bdf645772d632bf0', 'io.murano.Object'),
(u'a2be8265b01743c0bdf645772d632bf0', 'io.murano.resources.Network'),
(u'a2be8265b01743c0bdf645772d632bf0', 'io.murano.resources.NeutronNetwork')
]
| [
2,
15069,
357,
66,
8,
1853,
30446,
15503,
12,
11869,
446,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
407,
779,
428,
... | 1.779121 | 14,895 |
import datajoint as dj
default_prefix = 'adamacs_'
if 'custom' not in dj.config:
dj.config['custom'] = {}
db_prefix = dj.config['custom'].get('database.prefix', default_prefix)
| [
11748,
1366,
73,
1563,
355,
42625,
198,
198,
12286,
62,
40290,
796,
705,
324,
11494,
82,
62,
6,
198,
198,
361,
705,
23144,
6,
407,
287,
42625,
13,
11250,
25,
198,
220,
220,
220,
42625,
13,
11250,
17816,
23144,
20520,
796,
23884,
628... | 2.80303 | 66 |
from test_utils import get_rendered_file
| [
6738,
1332,
62,
26791,
1330,
651,
62,
26238,
62,
7753,
628
] | 3.818182 | 11 |
import json
import requests
# noinspection PyUnresolvedReferences
from six.moves.urllib.parse import urljoin
from .base import AnymailBaseBackend, BasePayload
from ..exceptions import AnymailRequestsAPIError, AnymailSerializationError
from .._version import __version__
class AnymailRequestsBackend(AnymailBaseBackend):
"""
Base Anymail email backend for ESPs that use an HTTP API via requests
"""
def __init__(self, api_url, **kwargs):
"""Init options from Django settings"""
self.api_url = api_url
super(AnymailRequestsBackend, self).__init__(**kwargs)
self.session = None
def post_to_esp(self, payload, message):
"""Post payload to ESP send API endpoint, and return the raw response.
payload is the result of build_message_payload
message is the original EmailMessage
return should be a requests.Response
Can raise AnymailRequestsAPIError for HTTP errors in the post
"""
params = payload.get_request_params(self.api_url)
try:
response = self.session.request(**params)
except requests.RequestException as err:
# raise an exception that is both AnymailRequestsAPIError
# and the original requests exception type
exc_class = type('AnymailRequestsAPIError', (AnymailRequestsAPIError, type(err)), {})
raise exc_class(
"Error posting to %s:" % params.get('url', '<missing url>'),
raised_from=err, email_message=message, payload=payload)
self.raise_for_status(response, payload, message)
return response
def raise_for_status(self, response, payload, message):
"""Raise AnymailRequestsAPIError if response is an HTTP error
Subclasses can override for custom error checking
(though should defer parsing/deserialization of the body to
parse_recipient_status)
"""
if response.status_code != 200:
raise AnymailRequestsAPIError(email_message=message, payload=payload, response=response)
def deserialize_json_response(self, response, payload, message):
"""Deserialize an ESP API response that's in json.
Useful for implementing deserialize_response
"""
try:
return response.json()
except ValueError:
raise AnymailRequestsAPIError("Invalid JSON in %s API response" % self.esp_name,
email_message=message, payload=payload, response=response)
class RequestsPayload(BasePayload):
"""Abstract Payload for AnymailRequestsBackend"""
def get_request_params(self, api_url):
"""Returns a dict of requests.request params that will send payload to the ESP.
:param api_url: the base api_url for the backend
:return: dict
"""
api_endpoint = self.get_api_endpoint()
if api_endpoint is not None:
url = urljoin(api_url, api_endpoint)
else:
url = api_url
return dict(
method=self.method,
url=url,
params=self.params,
data=self.serialize_data(),
headers=self.headers,
files=self.files,
auth=self.auth,
# json= is not here, because we prefer to do our own serialization
# to provide extra context in error messages
)
def get_api_endpoint(self):
"""Returns a str that should be joined to the backend's api_url for sending this payload."""
return None
def serialize_data(self):
"""Performs any necessary serialization on self.data, and returns the result."""
return self.data
def serialize_json(self, data):
"""Returns data serialized to json, raising appropriate errors.
Useful for implementing serialize_data in a subclass,
"""
try:
return json.dumps(data)
except TypeError as err:
# Add some context to the "not JSON serializable" message
raise AnymailSerializationError(orig_err=err, email_message=self.message,
backend=self.backend, payload=self)
| [
11748,
33918,
198,
198,
11748,
7007,
198,
2,
645,
1040,
14978,
9485,
3118,
411,
5634,
19927,
198,
6738,
2237,
13,
76,
5241,
13,
333,
297,
571,
13,
29572,
1330,
19016,
22179,
198,
198,
6738,
764,
8692,
1330,
4377,
4529,
14881,
7282,
43... | 2.510689 | 1,684 |
# @Team:Big Data Group
# @Time:2020/7/6 16:10
# @Author:albert·bing
# @File:MysqlUtil.py
# @Software:PyCharm
# start your code
import pymysql
# 测试
host = '81.70.166.101'
# 生产
# host='172.21.0.49'
password = 'r1kJzB'
port = 3306
# 黄历数据入库
# 查询日期
# 星座日数据入库
# 星座周数据入库
# 星座月数据入库
# 星座年数据入库
# 星座详情码表入库
# 插入日历
# 插入当日的疫情状况 --- 国内
# 插入当日的疫情状况 --- 国外
# 插入国内疫情的历史数据
# 插入国外疫情的历史数据
# 插入国内省市的当日数据疫情的数据
# 将每日数据前面添加一个area_id
# 插入疫情小区数据
# 插入境外输入的数据
# 获取县区的信息
| [
2,
2488,
15592,
171,
120,
248,
12804,
6060,
4912,
198,
2,
2488,
7575,
171,
120,
248,
42334,
14,
22,
14,
21,
1467,
25,
940,
198,
2,
2488,
13838,
171,
120,
248,
282,
4835,
9129,
4623,
198,
2,
2488,
8979,
171,
120,
248,
44,
893,
13... | 0.933852 | 514 |
#!/usr/bin/env python3
"""PyBluez advanced example l2-unreliable-client.py"""
import sys
import bluetooth
import bluetooth._bluetooth as bluez # low level bluetooth wrappers
# Create the client socket
sock = bluetooth.BluetoothSocket(bluetooth.L2CAP)
if len(sys.argv) < 4:
print("Usage: l2-unreliable-client.py <addr> <timeout> <num_packets>")
print(" address - device that l2-unreliable-server is running on")
print(" timeout - wait timeout * 0.625ms before dropping unACK'd packets")
print(" num_packets - number of 627-byte packets to send on connect")
sys.exit(2)
bt_addr = sys.argv[1]
timeout = int(sys.argv[2])
num_packets = int(sys.argv[3])
print("Trying to connect to {}:1001...".format(bt_addr))
port = 0x1001
sock.connect((bt_addr, port))
print("Connected. Adjusting link parameters.")
print("Current flush timeout is {} ms.".format(
bluetooth.read_flush_timeout(bt_addr)))
try:
bluetooth.write_flush_timeout(bt_addr, timeout)
except bluez.error as e:
print("Error setting flush timeout. Are you sure you're superuser?")
print(e)
sys.exit(1)
print("New flush timeout is {} ms.".format(
bluetooth.read_flush_timeout(bt_addr)))
totalsent = 0
for i in range(num_packets):
pkt = "0" * 672
totalsent += sock.send(pkt)
print("Sent {} bytes total.".format(totalsent))
sock.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
20519,
14573,
89,
6190,
1672,
300,
17,
12,
403,
2411,
3379,
12,
16366,
13,
9078,
37811,
198,
198,
11748,
25064,
198,
198,
11748,
48208,
16271,
198,
11748,
48208,
16271,
13557,... | 2.730769 | 494 |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from enum import Enum
from typing import Iterator, List, Optional, Tuple, Union, Any
# Third-party imports
import numpy as np
import pandas as pd
# First-party imports
from gluonts.core.component import validated
from gluonts.core.exception import GluonTSDataError
from gluonts.dataset.common import Dataset
from gluonts.model.forecast import SampleForecast
from gluonts.model.predictor import RepresentablePredictor
from gluonts.time_feature import time_features_from_frequency_str
# Relative imports
from ._model import NPTS
class NPTSPredictor(RepresentablePredictor):
r"""
Implementation of Non-Parametric Time Series Forecaster.
Forecasts of NPTS for time step :math:`T` are one of the previous values
of the time series (these could be known values or predictions), sampled
according to the (un-normalized) distribution :math:`q_T(t) > 0`, where
:math:`0 <= t < T`.
The distribution :math:`q_T` is expressed in terms of a feature map
:math:`f(t)` which associates a time step :math:`t` with a
:math:`D`-dimensional feature map :math:`[f_1(t), ..., f_D(t)]`. More
details on the feature map can be found below.
We offer two types of distribution kernels.
**Exponential Kernel (NPTS Forecaster)**
The sampling distribution :math:`q_T` for the `exponential` kernel
can be `weighted` or `unweighted` and is defined as follows.
.. math::
q_T(t) =
\begin{cases}
\exp( - \sum_{i=1}^D \alpha \left| f_i(t) - f_i(T) \right| )
& \text{unweighted}\\
\exp( - \sum_{i=1}^D \alpha_i \left| f_i(t) - f_i(T) \right| )
& \text{weighted}
\end{cases}
In the above definition :math:`\alpha > 0` and :math:`\alpha_i > 0` are
user-defined sampling weights.
**Uniform Kernel (Climatological Forecaster)**
The sampling distribution :math:`q_T` for the `uniform` kernel can be
`seasonal` or not. The `seasonal` version is defined as follows.
.. math::
q_T(t) =
\begin{cases}
1.0
& \text{if }f(t) = f(T) \\
0.0
& \text{otherwise}
\end{cases}
The `not seasonal` version is defined as the constant map.
.. math::
q_T(t) = 1.0
**Feature Map**
The feature map :math:`f` is configurable. The special case
:math:`f(t) = [t]` results in the so-called `naive NPTS`. For
non-seasonal models, by default we have :math:`f(t) = [t]` for the NPTS
Forecaster (i.e., with the `exponential` kernel) and no features for the
Climatological Forecaster (i.e., the `uniform` kernel).
For seasonal NPTS and seasonal Climatological, time features determined
based on the frequency of the time series are added to the default
feature map.
The default time features for various frequencies are
.. math::
f(t) =
\begin{cases}
[\mathit{MINUTE\_OF\_HOUR}(t)] & \text{for minutely frequency}\\
[\mathit{HOUR\_OF\_DAY}(t)] & \text{for hourly frequency}\\
[\mathit{DAY\_OF\_WEEK}(t)] & \text{for daily frequency}\\
[\mathit{DAY\_OF\_MONTH}(t)] & \text{for weekly frequency}\\
[\mathit{MONTH\_OF\_YEAR}(t)] & \text{for monthly frequency}
\end{cases}
During prediction, one can provide custom features in `feat_dynamic_real`
(these have to be defined in both the training and the prediction range).
If the model is seasonal, these custom features are added to the default
feature map, otherwise they are ignored. If `feat_dynamic_real` is not
empty, one can disable default time features by setting the flag
`use_default_time_features` to `False`.
Parameters
----------
freq
time frequency string
prediction_length
number of time steps to predict
context_length
number of time-steps that are considered before making predictions
(the default value of None corresponds to the case where all time steps
in the history are considered)
kernel_type
the type of kernel to use (either "exponential" or "uniform")
exp_kernel_weights
single weight :math:`\alpha` or the weights for the features to use
in the exponential kernel; currently, we use the single weight version
and for seasonal NPTS we just rescale :math:`\alpha` by `feature_scale`
for seasonal features.
use_seasonal_model
whether to use seasonal variant
use_default_time_features
time features derived based on the frequency of the time series
num_default_time_features
this is not exposed; this parameter is for having more control on the
number of default time features, as the date_feature_set adds too
many per default.
feature_scale
scale for time (seasonal) features in order to sample past seasons
with higher probability
"""
@validated()
def predict_time_series(
self,
ts: pd.Series,
num_samples: int,
custom_features: np.ndarray = None,
item_id: Optional[Any] = None,
) -> SampleForecast:
"""
Given a training time series, this method generates `Forecast` object
containing prediction samples for `prediction_length` time points.
The predictions are generated via weighted sampling where the weights
are determined by the `NPTSPredictor` kernel type and feature map.
Parameters
----------
ts
training time series object
custom_features
custom features (covariates) to use
num_samples
number of samples to draw
item_id
item_id to identify the time series
Returns
-------
Forecast
A prediction for the supplied `ts` and `custom_features`.
"""
if np.all(np.isnan(ts.values[-self.context_length :])):
raise GluonTSDataError(
f"The last {self.context_length} positions of the target time "
f"series are all NaN. Please increase the `context_length` "
f"parameter of your NPTS model so the last "
f"{self.context_length} positions of each target contain at "
f"least one non-NaN value."
)
# Get the features for both training and prediction ranges
train_features, predict_features = self._get_features(
ts.index, self.prediction_length, custom_features
)
# Compute weights for sampling for each time step `t` in the
# prediction range
sampling_weights_iterator = NPTS.compute_weights(
train_features=train_features,
pred_features=predict_features,
target_isnan_positions=np.argwhere(np.isnan(ts.values)),
kernel=self.kernel,
do_exp=self._is_exp_kernel(),
)
# Generate forecasts
forecast = NPTS.predict(
targets=ts,
prediction_length=self.prediction_length,
sampling_weights_iterator=sampling_weights_iterator,
num_samples=num_samples,
item_id=item_id,
)
return forecast
def _get_features(
self,
train_index: pd.DatetimeIndex,
prediction_length: int,
custom_features: np.ndarray = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Internal method for computing default, (optional) seasonal features
for the training and prediction ranges given time index for the
training range and the prediction length.
Appends `custom_features` if provided.
Parameters
----------
train_index
Pandas DatetimeIndex
prediction_length
prediction length
custom_features
shape: (num_custom_features, train_length + pred_length)
Returns
-------
a tuple of (training, prediction) feature tensors
shape: (num_features, train_length/pred_length)
"""
train_length = len(train_index)
full_time_index = pd.date_range(
train_index.min(),
periods=train_length + prediction_length,
freq=train_index.freq,
)
# Default feature map for both seasonal and non-seasonal models.
if self._is_exp_kernel():
# Default time index features: index of the time point
# [0, train_length + pred_length - 1]
features = np.expand_dims(
np.array(range(len(full_time_index))), axis=0
)
# Rescale time index features into the range: [-0.5, 0.5]
# similar to the seasonal features
# (see gluonts.time_feature)
features = features / (train_length + prediction_length - 1) - 0.5
else:
# For uniform seasonal model we do not add time index features
features = np.empty((0, len(full_time_index)))
# Add more features for seasonal variant
if self.use_seasonal_model:
if custom_features is not None:
total_length = train_length + prediction_length
assert len(custom_features.shape) == 2, (
"Custom features should be 2D-array where the rows "
"represent features and columns the time points."
)
assert custom_features.shape[1] == total_length, (
f"For a seasonal model, feat_dynamic_real must be defined "
f"for both training and prediction ranges. They are only "
f"provided for {custom_features.shape[1]} time steps "
f"instead of {train_length + prediction_length} steps."
)
features = np.vstack(
[features, self.feature_scale * custom_features]
)
if self.use_default_time_features or custom_features is None:
# construct seasonal features
seasonal_features_gen = time_features_from_frequency_str(
full_time_index.freqstr
)
seasonal_features = [
self.feature_scale * gen(full_time_index)
for gen in seasonal_features_gen[
: self.num_default_time_features
]
]
features = np.vstack([features, *seasonal_features])
train_features = features[:, :train_length]
pred_features = features[:, train_length:]
return train_features, pred_features
| [
2,
15069,
2864,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
198,
2,
921,
743,
407,
779,
428,
2393,
... | 2.417765 | 4,706 |
# -*- coding: utf-8 -*-
import unittest
from sdklib.http import HttpRequestContext
from sdklib.http.authorization import (
basic_authorization, x_11paths_authorization, X11PathsAuthentication, BasicAuthentication,
_get_11paths_serialized_headers
)
from sdklib.http.renderers import FormRenderer, JSONRenderer, MultiPartRenderer
from sdklib.http.headers import AUTHORIZATION_HEADER_NAME, X_11PATHS_BODY_HASH_HEADER_NAME
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
264,
34388,
8019,
13,
4023,
1330,
367,
29281,
18453,
21947,
198,
6738,
264,
34388,
8019,
13,
4023,
13,
9800,
1634,
1330,
357,
... | 2.810458 | 153 |
from kivy.uix.screenmanager import Screen
from kivy.lang import Builder
from kivy.properties import NumericProperty
from kivy.animation import Animation
from kivymd.app import MDApp
Builder.load_file('screens/titlescreen.kv')
| [
6738,
479,
452,
88,
13,
84,
844,
13,
9612,
37153,
1330,
15216,
198,
6738,
479,
452,
88,
13,
17204,
1330,
35869,
198,
6738,
479,
452,
88,
13,
48310,
1330,
399,
39223,
21746,
198,
6738,
479,
452,
88,
13,
11227,
341,
1330,
23535,
198,
... | 3 | 78 |
import abstract_parser
import os
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import argparse
import pandas as pd
from datetime import datetime
# TODO: read these from a file
COMMON = ['background', 'objetctive', 'objectives', 'introduction',
'methods', 'method', 'importance', 'results', 'result', 'aim', 'aims' 'i', 'comment', 'on', 'in']
if __name__ == "__main__":
nltk.download('stopwords')
folder_path = _parse_argument()
file_paths = []
for (dirpath, _, filenames) in os.walk(folder_path):
for f in filenames:
file_paths.append(os.path.abspath(os.path.join(dirpath, f)))
for file_path in file_paths:
data = abstract_parser.parse_file(file_path)
_process_abstracts(data)
| [
11748,
12531,
62,
48610,
198,
11748,
28686,
198,
11748,
299,
2528,
74,
198,
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
2245,
10879,
198,
6738,
299,
2528,
74,
13,
927,
13,
26634,
1330,
20890,
1273,
368,
647,
198,
11748,
1822,
29572... | 2.526984 | 315 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for vyos_interfaces
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: vyos_interfaces
short_description: Interfaces resource module
description:
- This module manages the interface attributes on VyOS network devices.
- This module supports managing base attributes of Ethernet, Bonding, VXLAN, Loopback
and Virtual Tunnel Interfaces.
version_added: 1.0.0
notes:
- Tested against VyOS 1.1.8 (helium).
- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html).
author:
- Nilashish Chakraborty (@nilashishc)
- Rohit Thakur (@rohitthakur2590)
options:
config:
description: The provided interfaces configuration.
type: list
elements: dict
suboptions:
name:
description:
- Full name of the interface, e.g. eth0, eth1, bond0, vti1, vxlan2.
type: str
required: true
description:
description:
- Interface description.
type: str
duplex:
description:
- Interface duplex mode.
- Applicable for Ethernet interfaces only.
choices:
- full
- half
- auto
type: str
enabled:
default: true
description:
- Administrative state of the interface.
- Set the value to C(true) to administratively enable the interface or C(false)
to disable it.
type: bool
mtu:
description:
- MTU for a specific interface. Refer to vendor documentation for valid values.
- Applicable for Ethernet, Bonding, VXLAN and Virtual Tunnel interfaces.
type: int
speed:
description:
- Interface link speed.
- Applicable for Ethernet interfaces only.
type: str
choices:
- auto
- '10'
- '100'
- '1000'
- '2500'
- '10000'
vifs:
description:
- Virtual sub-interfaces related configuration.
- 802.1Q VLAN interfaces are represented as virtual sub-interfaces in VyOS.
type: list
elements: dict
suboptions:
vlan_id:
description:
- Identifier for the virtual sub-interface.
type: int
description:
description:
- Virtual sub-interface description.
type: str
enabled:
description:
- Administrative state of the virtual sub-interface.
- Set the value to C(true) to administratively enable the interface or
C(false) to disable it.
type: bool
default: true
mtu:
description:
- MTU for the virtual sub-interface.
- Refer to vendor documentation for valid values.
type: int
running_config:
description:
- This option is used only with state I(parsed).
- The value of this option should be the output received from the VyOS device
by executing the command B(show configuration commands | grep interfaces).
- The state I(parsed) reads the configuration from C(running_config) option and
transforms it into Ansible structured data as per the resource module's argspec
and the value is then returned in the I(parsed) key within the result.
type: str
state:
description:
- The state of the configuration after module completion.
type: str
choices:
- merged
- replaced
- overridden
- deleted
- rendered
- gathered
- parsed
default: merged
"""
EXAMPLES = """
# Using merged
#
# -------------
# Before state:
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
- name: Merge provided configuration with device configuration
vyos.vyos.vyos_interfaces:
config:
- name: eth2
description: Configured by Ansible
enabled: true
vifs:
- vlan_id: 200
description: VIF 200 - ETH2
- name: eth3
description: Configured by Ansible
mtu: 1500
- name: bond1
description: Bond - 1
mtu: 1200
- name: vti2
description: VTI - 2
enabled: false
state: merged
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
# "before": [
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "enabled": true,
# "name": "eth3"
# },
# {
# "enabled": true,
# "name": "eth2"
# },
# {
# "enabled": true,
# "name": "eth1"
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "set interfaces ethernet eth2 description 'Configured by Ansible'",
# "set interfaces ethernet eth2 vif 200",
# "set interfaces ethernet eth2 vif 200 description 'VIF 200 - ETH2'",
# "set interfaces ethernet eth3 description 'Configured by Ansible'",
# "set interfaces ethernet eth3 mtu '1500'",
# "set interfaces bonding bond1",
# "set interfaces bonding bond1 description 'Bond - 1'",
# "set interfaces bonding bond1 mtu '1200'",
# "set interfaces vti vti2",
# "set interfaces vti vti2 description 'VTI - 2'",
# "set interfaces vti vti2 disable"
# ]
#
# "after": [
# {
# "description": "Bond - 1",
# "enabled": true,
# "mtu": 1200,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "VTI - 2",
# "enabled": false,
# "name": "vti2"
# },
# {
# "description": "Configured by Ansible",
# "enabled": true,
# "mtu": 1500,
# "name": "eth3"
# },
# {
# "description": "Configured by Ansible",
# "enabled": true,
# "name": "eth2",
# "vifs": [
# {
# "description": "VIF 200 - ETH2",
# "enabled": true,
# "vlan_id": "200"
# }
# ]
# },
# {
# "enabled": true,
# "name": "eth1"
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# -------------
# After state:
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond1 description 'Bond - 1'
# set interfaces bonding bond1 mtu '1200'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 vif 200 description 'VIF 200 - ETH2'
# set interfaces ethernet eth3 description 'Configured by Ansible'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 mtu '1500'
# set interfaces loopback lo
# set interfaces vti vti2 description 'VTI - 2'
# set interfaces vti vti2 disable
#
# Using replaced
#
# -------------
# Before state:
# -------------
#
# vyos:~$ show configuration commands | grep eth
# set interfaces bonding bond1 description 'Bond - 1'
# set interfaces bonding bond1 mtu '1400'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 description 'Management Interface for the Appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:f3:6c:b5'
# set interfaces ethernet eth0 smp_affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible Eng Team'
# set interfaces ethernet eth1 duplex 'full'
# set interfaces ethernet eth1 hw-id '08:00:27:ad:ef:65'
# set interfaces ethernet eth1 smp_affinity 'auto'
# set interfaces ethernet eth1 speed '100'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 duplex 'full'
# set interfaces ethernet eth2 hw-id '08:00:27:ab:4e:79'
# set interfaces ethernet eth2 mtu '500'
# set interfaces ethernet eth2 smp_affinity 'auto'
# set interfaces ethernet eth2 speed '100'
# set interfaces ethernet eth2 vif 200 description 'Configured by Ansible'
# set interfaces ethernet eth3 description 'Configured by Ansible'
# set interfaces ethernet eth3 duplex 'full'
# set interfaces ethernet eth3 hw-id '08:00:27:17:3c:85'
# set interfaces ethernet eth3 mtu '1500'
# set interfaces ethernet eth3 smp_affinity 'auto'
# set interfaces ethernet eth3 speed '100'
# set interfaces loopback lo
#
#
- name: Replace device configurations of listed interfaces with provided configurations
vyos.vyos.vyos_interfaces:
config:
- name: eth2
description: Replaced by Ansible
- name: eth3
description: Replaced by Ansible
- name: eth1
description: Replaced by Ansible
state: replaced
#
#
# -----------------------
# Module Execution Result
# -----------------------
#
# "before": [
# {
# "description": "Bond - 1",
# "enabled": true,
# "mtu": 1400,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Configured by Ansible",
# "duplex": "full",
# "enabled": true,
# "mtu": 1500,
# "name": "eth3",
# "speed": "100"
# },
# {
# "description": "Configured by Ansible",
# "duplex": "full",
# "enabled": true,
# "mtu": 500,
# "name": "eth2",
# "speed": "100",
# "vifs": [
# {
# "description": "VIF 200 - ETH2",
# "enabled": true,
# "vlan_id": "200"
# }
# ]
# },
# {
# "description": "Configured by Ansible Eng Team",
# "duplex": "full",
# "enabled": true,
# "name": "eth1",
# "speed": "100"
# },
# {
# "description": "Management Interface for the Appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "delete interfaces ethernet eth2 speed",
# "delete interfaces ethernet eth2 duplex",
# "delete interfaces ethernet eth2 mtu",
# "delete interfaces ethernet eth2 vif 200 description",
# "set interfaces ethernet eth2 description 'Replaced by Ansible'",
# "delete interfaces ethernet eth3 speed",
# "delete interfaces ethernet eth3 duplex",
# "delete interfaces ethernet eth3 mtu",
# "set interfaces ethernet eth3 description 'Replaced by Ansible'",
# "delete interfaces ethernet eth1 speed",
# "delete interfaces ethernet eth1 duplex",
# "set interfaces ethernet eth1 description 'Replaced by Ansible'"
# ]
#
# "after": [
# {
# "description": "Bond - 1",
# "enabled": true,
# "mtu": 1400,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Replaced by Ansible",
# "enabled": true,
# "name": "eth3"
# },
# {
# "description": "Replaced by Ansible",
# "enabled": true,
# "name": "eth2",
# "vifs": [
# {
# "enabled": true,
# "vlan_id": "200"
# }
# ]
# },
# {
# "description": "Replaced by Ansible",
# "enabled": true,
# "name": "eth1"
# },
# {
# "description": "Management Interface for the Appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# -------------
# After state:
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond1 description 'Bond - 1'
# set interfaces bonding bond1 mtu '1400'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Management Interface for the Appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Replaced by Ansible'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 description 'Replaced by Ansible'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 vif 200
# set interfaces ethernet eth3 description 'Replaced by Ansible'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
#
#
# Using overridden
#
#
# --------------
# Before state
# --------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Ethernet Interface - 0'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 mtu '1200'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible Eng Team'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 mtu '100'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth1 vif 100 description 'VIF 100 - ETH1'
# set interfaces ethernet eth1 vif 100 disable
# set interfaces ethernet eth2 description 'Configured by Ansible Team (Admin Down)'
# set interfaces ethernet eth2 disable
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 mtu '600'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth3 description 'Configured by Ansible Network'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
# set interfaces vti vti1 description 'Virtual Tunnel Interface - 1'
# set interfaces vti vti1 mtu '68'
#
#
- name: Overrides all device configuration with provided configuration
vyos.vyos.vyos_interfaces:
config:
- name: eth0
description: Outbound Interface For The Appliance
speed: auto
duplex: auto
- name: eth2
speed: auto
duplex: auto
- name: eth3
mtu: 1200
state: overridden
#
#
# ------------------------
# Module Execution Result
# ------------------------
#
# "before": [
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Virtual Tunnel Interface - 1",
# "enabled": true,
# "mtu": 68,
# "name": "vti1"
# },
# {
# "description": "Configured by Ansible Network",
# "enabled": true,
# "name": "eth3"
# },
# {
# "description": "Configured by Ansible Team (Admin Down)",
# "enabled": false,
# "mtu": 600,
# "name": "eth2"
# },
# {
# "description": "Configured by Ansible Eng Team",
# "enabled": true,
# "mtu": 100,
# "name": "eth1",
# "vifs": [
# {
# "description": "VIF 100 - ETH1",
# "enabled": false,
# "vlan_id": "100"
# }
# ]
# },
# {
# "description": "Ethernet Interface - 0",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1200,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "delete interfaces vti vti1 description",
# "delete interfaces vti vti1 mtu",
# "delete interfaces ethernet eth1 description",
# "delete interfaces ethernet eth1 mtu",
# "delete interfaces ethernet eth1 vif 100 description",
# "delete interfaces ethernet eth1 vif 100 disable",
# "delete interfaces ethernet eth0 mtu",
# "set interfaces ethernet eth0 description 'Outbound Interface For The Appliance'",
# "delete interfaces ethernet eth2 description",
# "delete interfaces ethernet eth2 mtu",
# "set interfaces ethernet eth2 duplex 'auto'",
# "delete interfaces ethernet eth2 disable",
# "set interfaces ethernet eth2 speed 'auto'",
# "delete interfaces ethernet eth3 description",
# "set interfaces ethernet eth3 mtu '1200'"
# ],
#
# "after": [
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "enabled": true,
# "name": "vti1"
# },
# {
# "enabled": true,
# "mtu": 1200,
# "name": "eth3"
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth2",
# "speed": "auto"
# },
# {
# "enabled": true,
# "name": "eth1",
# "vifs": [
# {
# "enabled": true,
# "vlan_id": "100"
# }
# ]
# },
# {
# "description": "Outbound Interface For The Appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# ------------
# After state
# ------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Outbound Interface For The Appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth1 vif 100
# set interfaces ethernet eth2 duplex 'auto'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 speed 'auto'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 mtu '1200'
# set interfaces loopback lo
# set interfaces vti vti1
#
#
# Using deleted
#
#
# -------------
# Before state
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond0 mtu '1300'
# set interfaces bonding bond1 description 'LAG - 1'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Outbound Interface for this appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible Network'
# set interfaces ethernet eth1 duplex 'full'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth1 speed '100'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 disable
# set interfaces ethernet eth2 duplex 'full'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 mtu '600'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 speed '100'
# set interfaces ethernet eth3 description 'Configured by Ansible Network'
# set interfaces ethernet eth3 duplex 'full'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 speed '100'
# set interfaces loopback lo
#
#
- name: Delete attributes of given interfaces (Note - This won't delete the interfaces
themselves)
vyos.vyos.vyos_interfaces:
config:
- name: bond1
- name: eth1
- name: eth2
- name: eth3
state: deleted
#
#
# ------------------------
# Module Execution Results
# ------------------------
#
# "before": [
# {
# "enabled": true,
# "mtu": 1300,
# "name": "bond0"
# },
# {
# "description": "LAG - 1",
# "enabled": true,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Configured by Ansible Network",
# "duplex": "full",
# "enabled": true,
# "name": "eth3",
# "speed": "100"
# },
# {
# "description": "Configured by Ansible",
# "duplex": "full",
# "enabled": false,
# "mtu": 600,
# "name": "eth2",
# "speed": "100"
# },
# {
# "description": "Configured by Ansible Network",
# "duplex": "full",
# "enabled": true,
# "name": "eth1",
# "speed": "100"
# },
# {
# "description": "Outbound Interface for this appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "delete interfaces bonding bond1 description",
# "delete interfaces ethernet eth1 speed",
# "delete interfaces ethernet eth1 duplex",
# "delete interfaces ethernet eth1 description",
# "delete interfaces ethernet eth2 speed",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth2 duplex",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth2 description",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth2 mtu",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth3 speed",
# "delete interfaces ethernet eth3 duplex",
# "delete interfaces ethernet eth3 description"
# ]
#
# "after": [
# {
# "enabled": true,
# "mtu": 1300,
# "name": "bond0"
# },
# {
# "enabled": true,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "enabled": true,
# "name": "eth3"
# },
# {
# "enabled": true,
# "name": "eth2"
# },
# {
# "enabled": true,
# "name": "eth1"
# },
# {
# "description": "Outbound Interface for this appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# ------------
# After state
# ------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond0 mtu '1300'
# set interfaces bonding bond1
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Outbound Interface for this appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
#
#
# Using gathered
#
# Before state:
# -------------
#
# vyos@192# run show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:50:5e:19'
# set interfaces ethernet eth0 smp_affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible'
# set interfaces ethernet eth1 duplex 'auto'
# set interfaces ethernet eth1 mtu '1500'
# set interfaces ethernet eth1 speed 'auto'
# set interfaces ethernet eth1 vif 200 description 'VIF - 200'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 duplex 'auto'
# set interfaces ethernet eth2 mtu '1500'
# set interfaces ethernet eth2 speed 'auto'
# set interfaces ethernet eth2 vif 200 description 'VIF - 200'
#
- name: Gather listed interfaces with provided configurations
vyos.vyos.vyos_interfaces:
config:
state: gathered
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
# "gathered": [
# {
# "description": "Configured by Ansible",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1500,
# "name": "eth2",
# "speed": "auto",
# "vifs": [
# {
# "description": "VIF - 200",
# "enabled": true,
# "vlan_id": 200
# }
# ]
# },
# {
# "description": "Configured by Ansible",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1500,
# "name": "eth1",
# "speed": "auto",
# "vifs": [
# {
# "description": "VIF - 200",
# "enabled": true,
# "vlan_id": 200
# }
# ]
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# After state:
# -------------
#
# vyos@192# run show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:50:5e:19'
# set interfaces ethernet eth0 smp_affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible'
# set interfaces ethernet eth1 duplex 'auto'
# set interfaces ethernet eth1 mtu '1500'
# set interfaces ethernet eth1 speed 'auto'
# set interfaces ethernet eth1 vif 200 description 'VIF - 200'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 duplex 'auto'
# set interfaces ethernet eth2 mtu '1500'
# set interfaces ethernet eth2 speed 'auto'
# set interfaces ethernet eth2 vif 200 description 'VIF - 200'
# Using rendered
#
#
- name: Render the commands for provided configuration
vyos.vyos.vyos_interfaces:
config:
- name: eth0
enabled: true
duplex: auto
speed: auto
- name: eth1
description: Configured by Ansible - Interface 1
mtu: 1500
speed: auto
duplex: auto
enabled: true
vifs:
- vlan_id: 100
description: Eth1 - VIF 100
mtu: 400
enabled: true
- vlan_id: 101
description: Eth1 - VIF 101
enabled: true
- name: eth2
description: Configured by Ansible - Interface 2 (ADMIN DOWN)
mtu: 600
enabled: false
state: rendered
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
#
# "rendered": [
# "set interfaces ethernet eth0 duplex 'auto'",
# "set interfaces ethernet eth0 speed 'auto'",
# "delete interfaces ethernet eth0 disable",
# "set interfaces ethernet eth1 duplex 'auto'",
# "delete interfaces ethernet eth1 disable",
# "set interfaces ethernet eth1 speed 'auto'",
# "set interfaces ethernet eth1 description 'Configured by Ansible - Interface 1'",
# "set interfaces ethernet eth1 mtu '1500'",
# "set interfaces ethernet eth1 vif 100 description 'Eth1 - VIF 100'",
# "set interfaces ethernet eth1 vif 100 mtu '400'",
# "set interfaces ethernet eth1 vif 101 description 'Eth1 - VIF 101'",
# "set interfaces ethernet eth2 disable",
# "set interfaces ethernet eth2 description 'Configured by Ansible - Interface 2 (ADMIN DOWN)'",
# "set interfaces ethernet eth2 mtu '600'"
# ]
# Using parsed
#
#
- name: Parse the configuration.
vyos.vyos.vyos_interfaces:
running_config:
"set interfaces ethernet eth0 address 'dhcp'
set interfaces ethernet eth0 duplex 'auto'
set interfaces ethernet eth0 hw-id '08:00:27:50:5e:19'
set interfaces ethernet eth0 smp_affinity 'auto'
set interfaces ethernet eth0 speed 'auto'
set interfaces ethernet eth1 description 'Configured by Ansible'
set interfaces ethernet eth1 duplex 'auto'
set interfaces ethernet eth1 mtu '1500'
set interfaces ethernet eth1 speed 'auto'
set interfaces ethernet eth1 vif 200 description 'VIF - 200'
set interfaces ethernet eth2 description 'Configured by Ansible'
set interfaces ethernet eth2 duplex 'auto'
set interfaces ethernet eth2 mtu '1500'
set interfaces ethernet eth2 speed 'auto'
set interfaces ethernet eth2 vif 200 description 'VIF - 200'"
state: parsed
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
#
# "parsed": [
# {
# "description": "Configured by Ansible",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1500,
# "name": "eth2",
# "speed": "auto",
# "vifs": [
# {
# "description": "VIF - 200",
# "enabled": true,
# "vlan_id": 200
# }
# ]
# },
# {
# "description": "Configured by Ansible",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1500,
# "name": "eth1",
# "speed": "auto",
# "vifs": [
# {
# "description": "VIF - 200",
# "enabled": true,
# "vlan_id": 200
# }
# ]
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
"""
RETURN = """
before:
description: The configuration as structured data prior to module invocation.
returned: always
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: list
after:
description: The configuration as structured data after module completion.
returned: when changed
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: list
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample:
- 'set interfaces ethernet eth1 mtu 1200'
- 'set interfaces ethernet eth2 vif 100 description VIF 100'
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.interfaces.interfaces import (
InterfacesArgs,
)
from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.config.interfaces.interfaces import (
Interfaces,
)
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
required_if = [
("state", "merged", ("config",)),
("state", "replaced", ("config",)),
("state", "rendered", ("config",)),
("state", "overridden", ("config",)),
("state", "parsed", ("running_config",)),
]
mutually_exclusive = [("config", "running_config")]
module = AnsibleModule(
argument_spec=InterfacesArgs.argument_spec,
required_if=required_if,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
)
result = Interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
13130,
2297,
10983,
198,
2,
22961,
3611,
5094,
13789,
410,
18,
13,
15,
10,
198,
2,
357,
3826,
27975,
45761,
393,
... | 2.281948 | 15,212 |