code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
#-------------------------------------------------------------------------------
# Name: Preprocessing for the EVI reference
# Inputs: EVI for each 8-day from all tiles and quality layers
#
# Author: Yao Zhang
#
# Created: 3/29/2017
# Modified:
# Copyright: (c) eomf 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
import multiprocessing
import os
from os import listdir
from os.path import isfile, join
from osgeo import gdal
from osgeo.gdalconst import *
from scipy.signal import savgol_filter
from ctypes import *
import numpy as np
import numpy.ma as ma
#from netCDF4 import Dataset
import time
import pandas as pd
startTime = time.time()
root = '/data/ifs/modis/products_006/mod09a1/geotiff/'
dirref = '/data/ifs/VPM/driving_data/EVI_ref/'
'''
def nan_helper(y):
return np.isnan(y), lambda z: z.nonzero()[0]
'''
def VIsmooth(ndvi):
rdays = c_int(len(ndvi))
fun = cdll.LoadLibrary(os.getcwd() + '/bise.so')
outndvi = (c_double * len(ndvi))()
slidingperiod = c_int(np.sum(ndvi == 0)/(len(ndvi)/20))
#apply the bise algorithm
fun.bise(byref(rdays), (c_double * len(ndvi))(*ndvi), byref(slidingperiod), outndvi)
bisendvi = np.frombuffer(outndvi)
#print bisendvi
bisendvi[bisendvi == -1] = np.nan
peaks = []
threshold = 1.5
check = np.argwhere(np.isnan(bisendvi))
#print check
if len(check) < 3:
return ndvi
else:
for i in range(0, len(check)):
if i == 0:
if bisendvi[check[i]] > (threshold * np.mean(bisendvi[np.array([(check[len(check)-1], check[i+1])])])):
if bisendvi[check[i]] > 3000: peaks.append(check[i])
else:
if i == (len(check)-1):
if bisendvi[check[i]] > (threshold * np.mean(bisendvi[np.array([(check[i-1], check[1])])])):
if bisendvi[check[i]] > 3000: peaks.append(check[i])
else:
if bisendvi[check[i]] > (threshold * np.mean(bisendvi[check[np.array([i-1, i+1])]])):
if bisendvi[check[i]] > 3000: peaks.append(check[i])
bisendvi[peaks] = np.nan
return bisendvi
#
def buildVrtFile(root, doy, tile, product):
fileList = []
for year in range(2000, 2017):
tiledir = os.path.join(root, product, str(year), tile)
for path, subdirs, files in os.walk(tiledir):
for name in files:
if (str(1000+doy)[1:] == name[13:16]) and (".tif" == name[-4:]): fileList.append([os.path.join(path, name)])
fileList.sort()
print len(fileList), 'files were built into a vrt file'
if len(fileList) == 0: return 0
filename = os.path.join('/data/ifs/users/yzhang/TEMP/VRT', str(1000+doy)[1:]+tile+product+'_list.txt')
outFilelist = open(filename, 'w')
for file in fileList:
outFilelist.write(file[0]+'\r\n')
outFilelist.close()
return filename
def write_file(output_name, output_array, GeoT, xsize, ysize, proJ, driverName='GTiff'):
print "creating", output_name
dr = gdal.GetDriverByName(driverName)
dr.Register()
do = dr.Create(output_name, xsize, ysize, 1, gdal.GDT_UInt16, options=['COMPRESS=LZW'])
do.SetGeoTransform(GeoT)
do.SetProjection(proJ)
do.GetRasterBand(1).WriteArray(output_array)
do.GetRasterBand(1).SetNoDataValue(32767)
do = None
def export_array (Rasters, directory, prod, tile, index):
fileNum = Rasters.shape[0]
for i in range(fileNum):
fileName=os.path.join(directory, 'MOD09A1.'+str(1000+index[i])[1:]+'.'+tile+'.'+prod+'.tif')
write_file(fileName, Rasters[i, :, :], geoTran, cols, rows, geoProj, "GTiff")
def parallelize_dataframe(df):
df_split = np.array_split(df, 5, axis=1)
pool = multiprocessing.Pool(5)
df = np.concatenate(pool.map(dataframeapply, df_split), axis=1)
pool.close()
pool.join()
return df
def dataframeapply(df):
df = pd.DataFrame(np.concatenate([df[23:46, :], df, df[:23, :]]))
df_smoothed = df.apply(VIsmooth)
df_smoothed = df_smoothed.interpolate(axis=0)
#make a SG filter
df_select = df_smoothed.as_matrix()[23:69, :]
df_select[np.isnan(df_select)] = 0
bisendviSG = savgol_filter(df_select, window_length=5, polyorder=3)
#bisendvi = None
bisendviSG[bisendviSG < 0] = 0
return bisendviSG
def import_all_year_data(tile):
temp = np.zeros([46, 2400*2400], np.dtype(float))
if int(tile[5:6])<2:
temp[:]=np.nan
for doy in range(1, 369, 8):
evifile = buildVrtFile(root, doy, tile, 'evi')
cloudfile = buildVrtFile(root, doy, tile, 'cloudmask')
aerosolfile = buildVrtFile(root, doy, tile, 'aerosolmask')
#if no file found for this DOY
if evifile == 0: continue
#doyList.append(doy)
#build vrt for EVI
vrtEVI = os.path.join(os.path.dirname(evifile), str(1000+doy)[1:]+tile+'EVI_vrt.vrt')
print "Building the vrt file: ", evifile
os.system('gdalbuildvrt -separate -input_file_list '+evifile+' '+vrtEVI)
inEVI = gdal.Open(vrtEVI)
EVI = inEVI.ReadAsArray()
#build vrt for cloudmask
vrtcloud = os.path.join(os.path.dirname(cloudfile), str(1000+doy)[1:]+tile+'cloud_vrt.vrt')
print "Building the vrt file: ", cloudfile
os.system('gdalbuildvrt -separate -input_file_list '+cloudfile+' '+vrtcloud)
incloud = gdal.Open(vrtcloud)
cloud = incloud.ReadAsArray()
#build vrt for aerosol
vrtaerosol = os.path.join(os.path.dirname(aerosolfile), str(1000+doy)[1:]+tile+'aerosol_vrt.vrt')
print "Building the vrt file: ", aerosolfile
os.system('gdalbuildvrt -separate -input_file_list '+aerosolfile+' '+vrtaerosol)
inaerosol = gdal.Open(vrtaerosol)
aerosol = inaerosol.ReadAsArray()
global rows, cols, geoProj, geoTran
rows = 2400
cols = 2400
geoTran = inEVI.GetGeoTransform()
geoProj = inEVI.GetProjection()
#mask for bad quality
EVIgood = ma.masked_where((cloud != 1)|(aerosol == 0)|(EVI < 0)|(EVI > 10000), EVI)
EVIgood = EVIgood.reshape(EVIgood.size/2400/2400, 2400*2400)
medianEVI = np.nanmedian(EVIgood, axis=0)
EVI = None
aerosol = None
cloud = None
EVIgood = None
#assign to the 46 layer of matrix
temp[(doy-1)/8, :] = medianEVI
meanEVI = None
return temp
def smooth(tile):
#first use this function to get mean and save it in an array
temp = import_all_year_data(tile)
####after get the mean value for all doy, I will run a bise gapfill first
print temp.size
##when using the single processing
#inputVI = pd.DataFrame(temp)
#VIsmoothed = inputVI.apply(VIsmooth, axis=0)
#VIsmoothed = VIsmoothed.as_matrix()
#VIsmoothed = parallelize_dataframe(temp)
##when using the multiprocessing
VIsmoothed = dataframeapply(temp)
VIsmoothed = VIsmoothed.reshape(VIsmoothed.size/2400/2400, 2400, 2400)
TILEdir = os.path.join(dirref, tile)
if not os.path.exists(TILEdir):
os.makedirs(TILEdir)
export_array (Rasters=np.int16(VIsmoothed), directory=TILEdir, \
prod='EVI.BISE.SG', tile=tile, index=range(1, 369, 8))
temp = None
inputVI = None
VIsmoothed = None
def process_list(tile=None, mp=True, count=1):
if mp:
#count = multiprocessing.cpu_count()-save_cpus
pool = multiprocessing.Pool(processes=count)
pool.map(smooth, tile)
#
'''
tile = ['h17v00','h12v01','h13v01','h14v01','h15v01','h16v01','h17v01','h18v01','h19v01','h20v01','h21v01',\
'h22v01','h23v01','h09v02','h10v02','h11v02','h12v02','h13v02','h14v02','h15v02','h16v02','h17v02',\
'h18v02','h19v02','h20v02','h21v02','h22v02','h23v02','h24v02','h25v02','h26v02','h06v03','h07v03',\
'h08v03','h09v03','h10v03','h11v03','h12v03','h13v03','h14v03','h15v03','h17v03','h18v03','h19v03',\
'h20v03','h21v03','h22v03','h23v03','h24v03','h25v03','h26v03','h27v03','h28v03','h29v03','h08v04',\
'h09v04','h10v04','h11v04','h12v04','h13v04','h14v04','h17v04','h18v04','h19v04','h20v04','h21v04',\
'h22v04','h23v04','h24v04','h25v04','h26v04','h27v04','h28v04','h07v05','h08v05','h09v05','h10v05',\
'h11v05','h12v05','h15v05','h16v05','h17v05','h18v05','h19v05','h20v05','h21v05','h22v05','h23v05',\
'h24v05','h25v05','h26v05','h27v05','h28v05','h29v05','h30v05','h02v06','h03v06','h07v06','h08v06',\
'h09v06','h10v06','h11v06','h16v06','h17v06','h18v06','h19v06','h20v06','h21v06','h22v06','h23v06',\
'h24v06','h25v06','h26v06','h27v06','h28v06','h29v06','h30v06','h31v06','h01v07','h03v07','h07v07',\
'h08v07','h09v07','h10v07','h11v07','h12v07','h15v07','h16v07','h17v07','h18v07','h19v07','h20v07',\
'h21v07','h22v07','h23v07','h24v07','h25v07','h26v07','h27v07','h28v07','h29v07','h30v07','h31v07',\
'h32v07','h33v07','h34v07','h00v08','h01v08','h02v08','h08v08','h09v08','h10v08','h11v08','h12v08',\
'h13v08','h16v08','h17v08','h18v08','h19v08','h20v08','h21v08','h22v08','h23v08','h25v08','h26v08',\
'h27v08','h28v08','h29v08','h30v08','h31v08','h32v08','h33v08','h34v08','h35v08','h00v09','h01v09',\
'h02v09','h03v09','h04v09','h08v09','h09v09','h10v09','h11v09','h12v09','h13v09','h14v09','h16v09',\
'h18v09','h19v09','h20v09','h21v09','h22v09','h23v09','h25v09','h27v09','h28v09','h29v09','h30v09',\
'h31v09','h32v09','h33v09','h34v09','h35v09',\
#southhemisphere
'h00v10','h01v10','h02v10','h03v10','h04v10','h05v10','h10v10','h11v10','h12v10','h13v10','h14v10',\
'h17v10','h19v10','h20v10','h21v10','h22v10','h23v10','h27v10','h28v10','h29v10','h30v10','h31v10',\
'h32v10','h33v10','h34v10','h35v10','h01v11','h02v11','h03v11','h04v11','h05v11','h06v11','h08v11',\
'h10v11','h11v11','h12v11','h13v11','h14v11','h15v11','h19v11','h20v11','h21v11','h22v11','h23v11',\
'h27v11','h28v11','h29v11','h30v11','h31v11','h32v11','h33v11','h11v12','h12v12','h13v12','h16v12',\
'h17v12','h19v12','h20v12','h24v12','h27v12','h28v12','h29v12','h30v12','h31v12','h32v12','h05v13',\
'h12v13','h13v13','h17v13','h20v13','h21v13','h22v13','h28v13','h29v13','h30v13','h31v13','h13v14',\
'h14v14','h15v14','h16v14','h18v14','h22v14','h27v14','h28v14']
'''
'''
tile = ["h17v00","h13v01","h10v02","h21v02","h22v02","h20v04","h21v04","h23v04",\
"h24v04","h27v04","h08v05","h10v05","h11v05","h17v05","h19v05","h20v05","h21v05",\
"h22v05","h23v05","h24v05","h25v05","h26v05","h27v05","h28v05","h29v05","h30v05",\
"h02v06","h03v06","h07v06","h08v06","h09v06","h11v06","h16v06","h17v06","h18v06",\
"h19v06","h20v06","h21v06","h22v06","h23v06","h24v06","h25v06","h26v06","h27v06",\
"h28v06","h29v06","h30v06","h31v06","h01v07","h03v07","h08v07","h12v07","h24v07"]
tile = ["h00v10","h01v10","h02v06","h02v10","h03v10","h04v10","h07v05","h08v06","h09v05",\
"h09v06","h10v05","h10v09","h10v10","h11v01","h11v05","h11v10","h12v09","h12v10",\
"h13v10","h14v00","h14v04","h14v10","h15v00","h16v00","h17v04","h17v05","h18v00",\
"h18v06","h19v00","h19v04","h19v05","h19v06","h20v00","h20v06","h21v00","h21v05",\
"h21v06","h21v10","h22v04","h22v05","h22v06","h23v04","h23v05","h23v06","h23v09",\
"h24v01","h24v05","h25v04","h25v05","h25v09","h26v04","h27v04","h27v05","h27v10",\
"h28v04","h28v09","h28v10","h29v09","h29v10","h30v05","h30v09","h30v10","h31v10",\
"h32v10","h35v09"]
'''
tile = ["h11v01","h12v01","h13v01","h14v00","h14v01","h15v00","h15v01","h16v00","h16v01",\
"h17v00","h17v01","h18v00","h18v01","h19v00","h19v01","h20v00","h20v01","h21v00",\
"h21v01","h22v01","h23v01","h24v01"]
#i = np.arange(0,5)
#segtiles = tile[0:60] #lotus
#segtiles = tile[60:120] #for peony
#segtiles = tile[120:180] #for cattle
#segtiles = tile[180:240] # crane
#segtiles = tile[240:287] #lily
#segtiles = tile[0:12] #for cattle
#segtiles = tile[12:24] #for lily
#segtiles = tile[24:36] #for crane
#segtiles = tile[36:48] #for lotus
#segtiles = tile[48:65] #for poeny
#smooth(segtiles)
#process_list(segtiles, mp=True, count=6)
#segtiles = tile[0:5] #for cattle
#segtiles = tile[5:10] #for lily
#segtiles = tile[10:15] #for crane
#segtiles = tile[15:20] #for lotus
segtiles = tile[20:22] #for poeny
process_list(segtiles, mp=True, count=5)
|
unknown
|
codeparrot/codeparrot-clean
| ||
<?php
namespace Illuminate\Contracts\Support;
use ArrayAccess;
use IteratorAggregate;
interface ValidatedData extends Arrayable, ArrayAccess, IteratorAggregate
{
//
}
|
php
|
github
|
https://github.com/laravel/framework
|
src/Illuminate/Contracts/Support/ValidatedData.php
|
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
#include "curl_setup.h"
#include "llist.h"
#ifdef DEBUGBUILD
#define LLISTINIT 0x100cc001 /* random pattern */
#define NODEINIT 0x12344321 /* random pattern */
#define NODEREM 0x54321012 /* random pattern */
#define VERIFYNODE(x) verifynode(x)
static struct Curl_llist_node *verifynode(struct Curl_llist_node *n)
{
DEBUGASSERT(!n || (n->_init == NODEINIT));
return n;
}
#else
#define VERIFYNODE(x) x
#endif
/*
* @unittest: 1300
*/
void Curl_llist_init(struct Curl_llist *l, Curl_llist_dtor dtor)
{
l->_size = 0;
l->_dtor = dtor;
l->_head = NULL;
l->_tail = NULL;
#ifdef DEBUGBUILD
l->_init = LLISTINIT;
#endif
}
/*
* Curl_llist_insert_next()
*
* Inserts a new list element after the given one 'e'. If the given existing
* entry is NULL and the list already has elements, the new one will be
* inserted first in the list.
*
* The 'ne' argument should be a pointer into the object to store.
*
* @unittest: 1300
*/
void Curl_llist_insert_next(struct Curl_llist *list,
struct Curl_llist_node *e, /* may be NULL */
const void *p,
struct Curl_llist_node *ne)
{
DEBUGASSERT(list);
DEBUGASSERT(list->_init == LLISTINIT);
DEBUGASSERT(ne);
#ifdef DEBUGBUILD
ne->_init = NODEINIT;
#endif
ne->_ptr = CURL_UNCONST(p);
ne->_list = list;
if(list->_size == 0) {
list->_head = ne;
list->_head->_prev = NULL;
list->_head->_next = NULL;
list->_tail = ne;
}
else {
/* if 'e' is NULL here, we insert the new element first in the list */
ne->_next = e ? e->_next : list->_head;
ne->_prev = e;
if(!e) {
list->_head->_prev = ne;
list->_head = ne;
}
else if(e->_next) {
e->_next->_prev = ne;
}
else {
list->_tail = ne;
}
if(e)
e->_next = ne;
}
++list->_size;
}
/*
* Curl_llist_append()
*
* Adds a new list element to the end of the list.
*
* The 'ne' argument should be a pointer into the object to store.
*
* @unittest: 1300
*/
void Curl_llist_append(struct Curl_llist *list, const void *p,
struct Curl_llist_node *ne)
{
DEBUGASSERT(list);
DEBUGASSERT(list->_init == LLISTINIT);
DEBUGASSERT(ne);
Curl_llist_insert_next(list, list->_tail, p, ne);
}
void *Curl_node_take_elem(struct Curl_llist_node *e)
{
void *ptr;
struct Curl_llist *list;
if(!e)
return NULL;
list = e->_list;
DEBUGASSERT(list);
DEBUGASSERT(list->_init == LLISTINIT);
DEBUGASSERT(list->_size);
DEBUGASSERT(e->_init == NODEINIT);
if(list) {
if(e == list->_head) {
list->_head = e->_next;
if(!list->_head)
list->_tail = NULL;
else
e->_next->_prev = NULL;
}
else {
if(e->_prev)
e->_prev->_next = e->_next;
if(!e->_next)
list->_tail = e->_prev;
else
e->_next->_prev = e->_prev;
}
--list->_size;
}
ptr = e->_ptr;
e->_list = NULL;
e->_ptr = NULL;
e->_prev = NULL;
e->_next = NULL;
#ifdef DEBUGBUILD
e->_init = NODEREM; /* specific pattern on remove - not zero */
#endif
return ptr;
}
/*
* @unittest: 1300
*/
UNITTEST void Curl_node_uremove(struct Curl_llist_node *e, void *user);
UNITTEST void Curl_node_uremove(struct Curl_llist_node *e, void *user)
{
struct Curl_llist *list;
void *ptr;
if(!e)
return;
list = e->_list;
DEBUGASSERT(list);
if(list) {
ptr = Curl_node_take_elem(e);
if(list->_dtor)
list->_dtor(user, ptr);
}
}
void Curl_node_remove(struct Curl_llist_node *e)
{
Curl_node_uremove(e, NULL);
}
void Curl_llist_destroy(struct Curl_llist *list, void *user)
{
if(list) {
DEBUGASSERT(list->_init == LLISTINIT);
while(list->_size > 0)
Curl_node_uremove(list->_tail, user);
}
}
/* Curl_llist_head() returns the first 'struct Curl_llist_node *', which
might be NULL */
struct Curl_llist_node *Curl_llist_head(struct Curl_llist *list)
{
DEBUGASSERT(list);
DEBUGASSERT(list->_init == LLISTINIT);
return VERIFYNODE(list->_head);
}
#ifdef UNITTESTS
/* Curl_llist_tail() returns the last 'struct Curl_llist_node *', which
might be NULL */
struct Curl_llist_node *Curl_llist_tail(struct Curl_llist *list)
{
DEBUGASSERT(list);
DEBUGASSERT(list->_init == LLISTINIT);
return VERIFYNODE(list->_tail);
}
#endif
/* Curl_llist_count() returns a size_t the number of nodes in the list */
size_t Curl_llist_count(struct Curl_llist *list)
{
DEBUGASSERT(list);
DEBUGASSERT(list->_init == LLISTINIT);
return list->_size;
}
/* Curl_node_elem() returns the custom data from a Curl_llist_node */
void *Curl_node_elem(struct Curl_llist_node *n)
{
DEBUGASSERT(n);
DEBUGASSERT(n->_init == NODEINIT);
return n->_ptr;
}
/* Curl_node_next() returns the next element in a list from a given
Curl_llist_node */
struct Curl_llist_node *Curl_node_next(struct Curl_llist_node *n)
{
DEBUGASSERT(n);
DEBUGASSERT(n->_init == NODEINIT);
return VERIFYNODE(n->_next);
}
#ifdef UNITTESTS
/* Curl_node_prev() returns the previous element in a list from a given
Curl_llist_node */
struct Curl_llist_node *Curl_node_prev(struct Curl_llist_node *n)
{
DEBUGASSERT(n);
DEBUGASSERT(n->_init == NODEINIT);
return VERIFYNODE(n->_prev);
}
#endif
struct Curl_llist *Curl_node_llist(struct Curl_llist_node *n)
{
DEBUGASSERT(n);
DEBUGASSERT(!n->_list || n->_init == NODEINIT);
return n->_list;
}
|
c
|
github
|
https://github.com/curl/curl
|
lib/llist.c
|
"""
Author: Jyler Menard
USES DOUBLE Q-LEARNING which is NOT the same as a double DQN.
Purpose implement a Deep Q Network that uses double Q-learning rather than Q-learning.
Q-learning can easily overestimate the value of an action from a state, resulting in overoptimistic value estimates.
Double Q-learning decouples the action selection step and the action evaluation step.
"""
import numpy as np
import matplotlib.pyplot as plt
import gym
import tensorflow as tf
from sklearn.preprocessing import StandardScaler
#import reinforcement.cart_pole_rbf as cpr
GAMMA = 0.99
ALL_POSSIBLE_ACTIONS = [0,1,2]
GAME = "CartPole-v1"
class NeuralNetwork():
def __init__(self, n_observations, n_actions):
# n_observations --> number of input nodes
# n_actions --> number of output nodes
self.n_actions = n_actions
self.n_observations = n_observations
print("Using Feed-forward Neural Network")
self.scaler = StandardScaler()
# MEMORY FOR EXPERIENCE REPLAY
self.mem = []
self.mem_min_size = 150
self.mem_max_size = 10000
self.mem_full = 0 # Default: False
self.tester = 0
##
# DEFINE NN ARCHITECTURE
##
learning_rate = 2.5e-4
hid1 = 200 #
hid2 = 200
#hid3 = 500
#print("hid1 = ", hid1, " hid2 = ",hid2)
print("hid1 = ",hid1, " learning_rate = ",learning_rate)
# DEFINE PLACEHOLDER(S)
self.x = tf.placeholder(tf.float32, shape=[None,n_observations])
self.y_true = tf.placeholder(tf.float32, shape=[None,n_actions])
self.A = tf.placeholder(tf.float32, shape=[None,n_actions])
# DEFINE VARIABLES
self.W1 = tf.Variable(tf.truncated_normal([n_observations,hid1],mean=0.0,stddev=0.1))
self.b1 = tf.Variable(tf.constant(0.1, shape=[hid1]))
self.W2 = tf.Variable(tf.truncated_normal([hid1,hid2],mean=0.0,stddev=0.1))
self.b2 = tf.Variable(tf.constant(0.1, shape=[hid2]))
#self.W3 = tf.Variable(tf.truncated_normal([hid2,hid3],mean=0.0,stddev=0.1))
#self.b3 = tf.Variable(tf.constant(0.1, shape=[hid3]))
self.W4 = tf.Variable(tf.truncated_normal([hid2, n_actions],mean=0.0, stddev=0.1))
self.b4 = tf.Variable(tf.constant(0.1, shape=[n_actions]))
# DEFINE ARCHITECTURE
y1 = tf.matmul(self.x, self.W1) + self.b1
z1 = tf.nn.tanh(y1)
y2 = tf.matmul(z1, self.W2) + self.b2
z2 = tf.nn.tanh(y2)
#y3 = tf.matmul(z2, self.W3) + self.b3
z3 = z2#tf.nn.relu(y3)
y_pred = tf.matmul(z3, self.W4) + self.b4
# DEFINE OPERATIONS AND COST FUNCTION
#selected_action_values = tf.reduce_sum(
#tf.multiply(y_pred,self.A),
# y_pred * tf.one_hot(self.A, n_actions),
# keepdims=True
#reduction_indices=[1]
# )
selected_action_values = y_pred * self.A#tf.one_hot(self.A, n_actions)
delta = selected_action_values - self.y_true
#delta = y_pred - self.y_true
#cost = tf.reduce_sum( delta*delta )
cost = tf.reduce_sum(tf.square(delta))
# OPS
self.train_ops = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
#self.train_ops = tf.train.AdamOptimizer(learning_rate).minimize(cost)
self.predict_ops = y_pred
self.sess = tf.InteractiveSession()
sess = self.sess
init = tf.global_variables_initializer()
sess.run(init)
self.grad_vals = []
pass
def feedfwd(self,X,size):
input_size = int(X.get_shape()[1])
y = tf.matmul(X,self.W) + self.b1
z = tf.nn.relu(y)
return z
def update_test(self,num):
self.tester += 1
def partial_fit(self,G,X):
# X --> observations, 1x4 initially
# G --> vector of returns, 1x2 initially
#print("Shape = ", G.shape, " G = ",G)
if self.mem_full:
batch_X, batch_G, batch_A = self.batch_replay(32)
feed_dictionary = {self.x:batch_X,self.y_true:batch_G, self.A:batch_A}
self.sess.run(self.train_ops, feed_dict=feed_dictionary)
def predict(self,X):
# X --> observations
if not self.mem_full:
return np.random.random((1,self.n_actions))
y = self.sess.run(self.predict_ops, feed_dict={self.x:X})
#print("predicted y = ",y)
return y
def get_state(self,observations):
shape = observations.shape[0]
y = observations.reshape((1,shape))
return y
def store_in_mem(self,s,a,r,s_prime,G):
tup_4 = (s,a,r,s_prime,G)
if self.mem_full:
if len(self.mem)>=self.mem_max_size:
self.mem.pop(0)
self.mem.append(tup_4)
else:
self.mem.append(tup_4)
if len(self.mem) == self.mem_min_size:
print("Memory full")
self.mem_full = 1
def batch_replay(self, batch_size):
# mem filled with 4-tuples (s,a,r,s')
# Need to grab random batch of size batch_size
temp_batches = self.mem.copy()
np.random.shuffle(temp_batches)
temp_batches = temp_batches[:batch_size]
batch_G = np.zeros((batch_size,self.n_actions))
batch_X = np.zeros((batch_size,self.n_observations))
batch_A = np.zeros((batch_size,self.n_actions))#,dtype=np.int32)
#batch_A = []
for i in range(batch_size):
s, a, r, s_prime,temp_G = temp_batches[i]
#V_s_prime = self.predict(s_prime)
#batch_G[i][a] = r + GAMMA*np.max(V_s_prime)
batch_G[i] = temp_G
batch_X[i] = s
#batch_X[i] *= batch_A[i]
batch_A[i][a] = 1
#batch_A.append(a)
#print(batch_A)
return batch_X, batch_G, batch_A
def epsilon_greedy(model,model_2, s, epsilon, env):
p = np.random.random()
if p <= epsilon:
action = env.action_space.sample()#np.random.choice(ALL_POSSIBLE_ACTIONS)
return action
# Compute the value for each action given the state
V = model.predict(s)
V_2 = model_2.predict(s)
return np.argmax(V + V_2)
def get_return(model_1,model_2, s_prime,a,r, target_model):
## target_model says which model is going to make the target, Y, of Y-Y_pred.
if target_model == 1:
# model 1 selects act, model 2 evaluates it.
V_s_prime = model_2.predict(s_prime)
#print(V_s_prime, V_s_prime.shape)
V_s_prime_eval_act = model_1.predict(s_prime)
state_act_val = V_s_prime_eval_act[0][np.argmax(V_s_prime)]
G = np.zeros((1,V_s_prime.shape[1]))
else:
# model 2 selects act, model 1 evaluates it.
V_s_prime = model_1.predict(s_prime)
#print(V_s_prime, V_s_prime.shape)
V_s_prime_eval_act = model_2.predict(s_prime)
state_act_val = V_s_prime_eval_act[0][np.argmax(V_s_prime)]
G = np.zeros((1,V_s_prime.shape[1]))
G[0][a] = r + GAMMA*state_act_val
return G
def reward_function(observation, target_pos):
y = (target_pos - observation[0])/(target_pos*3)
return abs(y * 100)
def play_episode(env, model, model_2, epsilon, tot_acts):
done = False
obs = env.reset()
s = model.get_state(obs)
num = 0
run_avg = 0
prnt = 1
while not done and num<500:
num+=1
if num>300 and prnt==1:
print("num > 300, performing very well")
prnt = 0
tot_acts += 1
a = epsilon_greedy(model,model_2, s, epsilon,env)
observation, r, done, _ = env.step(a)
s_prime = model.get_state(observation)
# FOR CART-POLE GAME
if done:
r = -200
if r >-100:
run_avg += 1
# FOR MOUNTAIN CAR
#if observation[0] > 0:
# r = +50
#r = reward_function(observation, 0.6)
num_p = np.random.random()
if num_p >= 0.5:
G = get_return(model,model_2, s_prime, a,r,2)
model.store_in_mem(s,a,r,s_prime,G)
model.partial_fit(G, s)
else:
G = get_return(model,model_2, s_prime, a,r,1)
model_2.store_in_mem(s,a,r,s_prime,G)
model_2.partial_fit(G, s)
s = s_prime
return run_avg, tot_acts
def main(N=100):
#env = gym.make("CartPole-v1")
env = gym.make(GAME)
record_bool = input("Record every perfect cube training episode? [Y/n]")
while True:
if record_bool not in ["Y","n"]:
print("Wrong input")
else:
break
if record_bool=="Y":
env = gym.wrappers.Monitor(env, "videos",force=True)
else:
pass
D = len(env.observation_space.sample())
K = env.action_space.n
model = NeuralNetwork(D,K)
model_2 = NeuralNetwork(D,K)
running_average = []
positions = []
tot_run_avg = 0
tot_acts = 0
for i in range(N):
epsilon = 1.0/(np.sqrt(i) + 1)
temp_run_avg, temp_tot_acts = play_episode(env, model, model_2, epsilon, tot_acts)
tot_run_avg += temp_run_avg
tot_acts += temp_tot_acts
if i%50 == 0 and i!=0:
tot_run_avg/= 50
print("episode = ",i, " avg over 50 = ",tot_run_avg)
running_average.append(tot_run_avg)
tot_run_avg = 0
plt.plot(running_average)
plt.xlabel("No. games (x100)")
plt.ylabel("50-Game Time Average")
plt.show()
input("test?")
test(model, model_2, env)
def test(model,model_2, env):
num=0
alpha = 0.1
for i in range(10):
done = False
obs = env.reset()
s = model.get_state(obs)
while not done:
# if i == 1:
# env.render()
a = epsilon_greedy(model,model_2, s, -1,env)
observation, r, done, _ = env.step(a)
s_prime = model.get_state(observation)
s = s_prime
num+=1
# if i == 1:
# env.close()
print("tot = ",num/10)
#env = gym.make("CartPole-v1")
env = gym.make(GAME)
done =False
obs = env.reset()
s = model.get_state(obs)
while not done:
env.render()
a = epsilon_greedy(model,model_2, s, -1,env)
observation, r, done, _ = env.step(a)
s_prime = model.get_state(observation)
s = s_prime
env.close()
|
unknown
|
codeparrot/codeparrot-clean
| ||
basic = __lazy_import__('test.test_import.data.lazy_imports',
fromlist=("basic2", ))
basic
|
python
|
github
|
https://github.com/python/cpython
|
Lib/test/test_import/data/lazy_imports/dunder_lazy_import_used.py
|
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2beta1",
"metadata": {
"name": "v40.refresh_numeric.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana",
"version": "v0",
"datasource": {
"name": "-- Grafana --"
},
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {},
"layout": {
"kind": "GridLayout",
"spec": {
"items": []
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [],
"timeSettings": {
"timezone": "",
"from": "now-6h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Numeric Refresh Test Dashboard",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v1beta1"
}
}
}
|
json
|
github
|
https://github.com/grafana/grafana
|
apps/dashboard/pkg/migration/conversion/testdata/migrated_dashboards_output/v1beta1-mig-v40.refresh_numeric.v42.v2beta1.json
|
from django import forms
from . import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
class SearchJourney(forms.Form):
city_from = forms.CharField()
city_to = forms.CharField()
class Journey(forms.ModelForm):
#def __init__(self, user, *args, **kwargs):
# super(Journey, self).__init__(*args, **kwargs)
# car = models.Car.objects.filter(owner=user)
# self.fields['car'].queryset = car
#car = forms.ModelChoiceField(
# queryset=None,
# empty_label=None,
# to_field_name='name',
# label='Car'
#)
class Meta:
model = models.Journey
fields = ['seats', 'car', 'date', 'approx', 'approx_note', 'currency']
wpt_base_factory_kwargs = {
'parent_model': models.Journey,
'model': models.JourneyWaypoints,
'fields': ('waypoint', 'order', 'label', 'output_only', 'segment_price'),
'extra': 0,
'can_order': True,
'can_delete': True,
}
wpt_new_factory_kwargs = dict(wpt_base_factory_kwargs)
wpt_update_factory_kwargs = dict(wpt_base_factory_kwargs)
wpt_new_factory_kwargs['extra'] = 2
WaypointNewFormSetFactory = forms.inlineformset_factory(
**wpt_new_factory_kwargs)
WaypointUpdateFormSetFactory = forms.inlineformset_factory(
**wpt_update_factory_kwargs)
class JourneyFormSet(forms.ModelForm):
class Meta:
model = models.JourneyWaypoints
fields = ['journey', 'waypoint', 'label']
js = ('js/jquery.js',)
class CarForm(forms.ModelForm):
class Meta():
model = models.Car
fields = '__all__'
exclude = ('owner',)
class UserForm(forms.ModelForm):
class Meta():
model = User
fields = ['username', 'first_name', 'last_name']
class UserProfileForm(forms.ModelForm):
class Meta():
model = models.UserProfile
fields = '__all__'
exclude = ('user',)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# `actix-web-actors`
> Actix actors support for Actix Web.
>
> This crate is deprecated. Migrate to [`actix-ws`](https://crates.io/crates/actix-ws).
<!-- prettier-ignore-start -->
[](https://crates.io/crates/actix-web-actors)
[](https://docs.rs/actix-web-actors/4.3.1)


<br />

[](https://crates.io/crates/actix-web-actors)
[](https://discord.gg/NWpN5mmg3x)
<!-- prettier-ignore-end -->
|
unknown
|
github
|
https://github.com/actix/actix-web
|
actix-web-actors/README.md
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v8.resources.types import user_list
from google.ads.googleads.v8.services.types import user_list_service
from .base import UserListServiceTransport, DEFAULT_CLIENT_INFO
class UserListServiceGrpcTransport(UserListServiceTransport):
"""gRPC backend transport for UserListService.
Service to manage user lists.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning)
host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
)
@classmethod
def create_channel(cls,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_user_list(self) -> Callable[
[user_list_service.GetUserListRequest],
user_list.UserList]:
r"""Return a callable for the get user list method over gRPC.
Returns the requested user list.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetUserListRequest],
~.UserList]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_user_list' not in self._stubs:
self._stubs['get_user_list'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v8.services.UserListService/GetUserList',
request_serializer=user_list_service.GetUserListRequest.serialize,
response_deserializer=user_list.UserList.deserialize,
)
return self._stubs['get_user_list']
@property
def mutate_user_lists(self) -> Callable[
[user_list_service.MutateUserListsRequest],
user_list_service.MutateUserListsResponse]:
r"""Return a callable for the mutate user lists method over gRPC.
Creates or updates user lists. Operation statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`DatabaseError <>`__ `DistinctError <>`__ `FieldError <>`__
`FieldMaskError <>`__ `HeaderError <>`__ `InternalError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotAllowlistedError <>`__ `NotEmptyError <>`__
`OperationAccessDeniedError <>`__ `QuotaError <>`__
`RangeError <>`__ `RequestError <>`__ `StringFormatError <>`__
`StringLengthError <>`__ `UserListError <>`__
Returns:
Callable[[~.MutateUserListsRequest],
~.MutateUserListsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'mutate_user_lists' not in self._stubs:
self._stubs['mutate_user_lists'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v8.services.UserListService/MutateUserLists',
request_serializer=user_list_service.MutateUserListsRequest.serialize,
response_deserializer=user_list_service.MutateUserListsResponse.deserialize,
)
return self._stubs['mutate_user_lists']
__all__ = (
'UserListServiceGrpcTransport',
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding:utf-8 -*-
"""
/***************************************************************************
qgsplugininstallerinstallingdialog.py
Plugin Installer module
-------------------
Date : June 2013
Copyright : (C) 2013 by Borys Jurgiel
Email : info at borysjurgiel dot pl
This module is based on former plugin_installer plugin:
Copyright (C) 2007-2008 Matthew Perry
Copyright (C) 2008-2013 Borys Jurgiel
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from qgis.PyQt.QtCore import QDir, QUrl, QFile, QCoreApplication
from qgis.PyQt.QtWidgets import QDialog
from qgis.PyQt.QtNetwork import QNetworkRequest, QNetworkReply
import qgis
from qgis.core import QgsNetworkAccessManager, QgsAuthManager
from .ui_qgsplugininstallerinstallingbase import Ui_QgsPluginInstallerInstallingDialogBase
from .installer_data import removeDir, repositories
from .unzip import unzip
class QgsPluginInstallerInstallingDialog(QDialog, Ui_QgsPluginInstallerInstallingDialogBase):
# ----------------------------------------- #
def __init__(self, parent, plugin):
QDialog.__init__(self, parent)
self.setupUi(self)
self.plugin = plugin
self.mResult = ""
self.progressBar.setRange(0, 0)
self.progressBar.setFormat("%p%")
self.labelName.setText(plugin["name"])
self.buttonBox.clicked.connect(self.abort)
url = QUrl(plugin["download_url"])
fileName = plugin["filename"]
tmpDir = QDir.tempPath()
tmpPath = QDir.cleanPath(tmpDir + "/" + fileName)
self.file = QFile(tmpPath)
self.request = QNetworkRequest(url)
authcfg = repositories.all()[plugin["zip_repository"]]["authcfg"]
if authcfg and isinstance(authcfg, str):
if not QgsAuthManager.instance().updateNetworkRequest(
self.request, authcfg.strip()):
self.mResult = self.tr(
"Update of network request with authentication "
"credentials FAILED for configuration '{0}'").format(authcfg)
self.request = None
if self.request is not None:
self.reply = QgsNetworkAccessManager.instance().get(self.request)
self.reply.downloadProgress.connect(self.readProgress)
self.reply.finished.connect(self.requestFinished)
self.stateChanged(4)
def exec_(self):
if self.request is None:
return QDialog.Rejected
QDialog.exec_(self)
# ----------------------------------------- #
def result(self):
return self.mResult
# ----------------------------------------- #
def stateChanged(self, state):
messages = [self.tr("Installing..."), self.tr("Resolving host name..."), self.tr("Connecting..."), self.tr("Host connected. Sending request..."), self.tr("Downloading data..."), self.tr("Idle"), self.tr("Closing connection..."), self.tr("Error")]
self.labelState.setText(messages[state])
# ----------------------------------------- #
def readProgress(self, done, total):
if total > 0:
self.progressBar.setMaximum(total)
self.progressBar.setValue(done)
# ----------------------------------------- #
def requestFinished(self):
reply = self.sender()
self.buttonBox.setEnabled(False)
if reply.error() != QNetworkReply.NoError:
self.mResult = reply.errorString()
if reply.error() == QNetworkReply.OperationCanceledError:
self.mResult += "<br/><br/>" + QCoreApplication.translate("QgsPluginInstaller", "If you haven't cancelled the download manually, it might be caused by a timeout. In this case consider increasing the connection timeout value in QGIS options.")
self.reject()
reply.deleteLater()
return
self.file.open(QFile.WriteOnly)
self.file.write(reply.readAll())
self.file.close()
self.stateChanged(0)
reply.deleteLater()
pluginDir = qgis.utils.home_plugin_path
tmpPath = self.file.fileName()
# make sure that the parent directory exists
if not QDir(pluginDir).exists():
QDir().mkpath(pluginDir)
# if the target directory already exists as a link, remove the link without resolving:
QFile(pluginDir + str(QDir.separator()) + self.plugin["id"]).remove()
try:
unzip(str(tmpPath), str(pluginDir)) # test extract. If fails, then exception will be raised and no removing occurs
# removing old plugin files if exist
removeDir(QDir.cleanPath(pluginDir + "/" + self.plugin["id"])) # remove old plugin if exists
unzip(str(tmpPath), str(pluginDir)) # final extract.
except:
self.mResult = self.tr("Failed to unzip the plugin package. Probably it's broken or missing from the repository. You may also want to make sure that you have write permission to the plugin directory:") + "\n" + pluginDir
self.reject()
return
try:
# cleaning: removing the temporary zip file
QFile(tmpPath).remove()
except:
pass
self.close()
# ----------------------------------------- #
def abort(self):
if self.reply.isRunning():
self.reply.finished.disconnect()
self.reply.abort()
del self.reply
self.mResult = self.tr("Aborted by user")
self.reject()
|
unknown
|
codeparrot/codeparrot-clean
| ||
name: autoclose comment
# Post comment on PRs when labeled with "autoclose".
permissions:
contents: read
pull-requests: write
on:
pull_request_target:
types:
- labeled
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GH_REPO: ${{ github.repository }}
PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }}
jobs:
post_comment:
name: post_comment
if: github.event.label.name == 'autoclose'
runs-on: ubuntu-latest
steps:
- name: comment on potential autoclose
run: |
gh api \
--method POST \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
/repos/$GH_REPO/issues/$PULL_REQUEST_NUMBER/comments \
-f "body=$BODY"
env:
BODY: >
⏰ This pull request might be automatically closed in two weeks from now.
Thank you for your contribution to scikit-learn and for the effort you have
put into this PR. This pull request does not yet meet the quality and
clarity needed for an effective review. Project maintainers have limited
time for code reviews, and our goal is to prioritize well-prepared
contributions to keep scikit-learn maintainable.
To increase the chance of a productive review, please refer to: [How do I
improve my issue or pull
request?](https://scikit-learn.org/dev/faq.html#how-do-i-improve-my-issue-or-pull-request)
As the author, you are responsible for driving this PR, which entails doing
necessary background research as well as presenting its context and your
thought process. If you are a [new
contributor](https://scikit-learn.org/dev/developers/contributing.html#new-contributors),
or do not know how to fulfill these requirements, we recommend that you
familiarise yourself with scikit-learn's development conventions via other
contribution types (e.g., reviewing PRs) before submitting code.
Scikit-learn maintainers cannot provide one-to-one guidance on this PR.
However, if you ask focused, well-researched questions, a community
member may be willing to help. 💬
If you substantially improve this PR within two weeks, a team member may
remove the `autoclose` label and the PR stays open. Cosmetic changes or
incomplete fixes will not be sufficient. Maintainers will assess
improvements on their own schedule. Please do not ping (`@`) maintainers.
|
unknown
|
github
|
https://github.com/scikit-learn/scikit-learn
|
.github/workflows/autoclose-comment.yml
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_global
short_description: NetApp E-Series manage global settings configuration
description:
- Allow the user to configure several of the global settings associated with an E-Series storage-system
version_added: '2.7'
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.eseries
options:
name:
description:
- Set the name of the E-Series storage-system
- This label/name doesn't have to be unique.
- May be up to 30 characters in length.
aliases:
- label
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- This module requires Web Services API v1.3 or newer.
"""
EXAMPLES = """
- name: Set the storage-system name
netapp_e_global:
name: myArrayName
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
name:
description:
- The current name/label of the storage-system.
returned: on success
sample: myArrayName
type: str
"""
import json
import logging
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class GlobalSettings(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=False, aliases=['label']),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.name = args['name']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.name and len(self.name) > 30:
self.module.fail_json(msg="The provided name is invalid, it must be < 30 characters in length.")
def get_name(self):
try:
(rc, result) = request(self.url + 'storage-systems/%s' % self.ssid, headers=HEADERS, **self.creds)
if result['status'] in ['offline', 'neverContacted']:
self.module.fail_json(msg="This storage-system is offline! Array Id [%s]." % (self.ssid))
return result['name']
except Exception as err:
self.module.fail_json(msg="Connection failure! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
def update_name(self):
name = self.get_name()
update = False
if self.name != name:
update = True
body = dict(name=self.name)
if update and not self.check_mode:
try:
(rc, result) = request(self.url + 'storage-systems/%s/configuration' % self.ssid, method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
self._logger.info("Set name to %s.", result['name'])
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(
msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.update_name()
name = self.get_name()
self.module.exit_json(msg="The requested settings have been updated.", changed=update, name=name)
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = GlobalSettings()
settings()
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import array
import unittest
from test.test_support import run_unittest, import_module, get_attribute
import os, struct
fcntl = import_module('fcntl')
termios = import_module('termios')
get_attribute(termios, 'TIOCGPGRP') #Can't run tests without this feature
try:
tty = open("/dev/tty", "r")
except IOError:
raise unittest.SkipTest("Unable to open /dev/tty")
else:
# Skip if another process is in foreground
r = fcntl.ioctl(tty, termios.TIOCGPGRP, " ")
tty.close()
rpgrp = struct.unpack("i", r)[0]
if rpgrp not in (os.getpgrp(), os.getsid(0)):
raise unittest.SkipTest("Neither the process group nor the session "
"are attached to /dev/tty")
del tty, r, rpgrp
try:
import pty
except ImportError:
pty = None
class IoctlTests(unittest.TestCase):
def test_ioctl(self):
# If this process has been put into the background, TIOCGPGRP returns
# the session ID instead of the process group id.
ids = (os.getpgrp(), os.getsid(0))
tty = open("/dev/tty", "r")
r = fcntl.ioctl(tty, termios.TIOCGPGRP, " ")
rpgrp = struct.unpack("i", r)[0]
self.assertIn(rpgrp, ids)
def _check_ioctl_mutate_len(self, nbytes=None):
buf = array.array('i')
intsize = buf.itemsize
ids = (os.getpgrp(), os.getsid(0))
# A fill value unlikely to be in `ids`
fill = -12345
if nbytes is not None:
# Extend the buffer so that it is exactly `nbytes` bytes long
buf.extend([fill] * (nbytes // intsize))
self.assertEqual(len(buf) * intsize, nbytes) # sanity check
else:
buf.append(fill)
with open("/dev/tty", "r") as tty:
r = fcntl.ioctl(tty, termios.TIOCGPGRP, buf, 1)
rpgrp = buf[0]
self.assertEqual(r, 0)
self.assertIn(rpgrp, ids)
def test_ioctl_mutate(self):
self._check_ioctl_mutate_len()
def test_ioctl_mutate_1024(self):
# Issue #9758: a mutable buffer of exactly 1024 bytes wouldn't be
# copied back after the system call.
self._check_ioctl_mutate_len(1024)
def test_ioctl_mutate_2048(self):
# Test with a larger buffer, just for the record.
self._check_ioctl_mutate_len(2048)
def test_ioctl_signed_unsigned_code_param(self):
if not pty:
raise unittest.SkipTest('pty module required')
mfd, sfd = pty.openpty()
try:
if termios.TIOCSWINSZ < 0:
set_winsz_opcode_maybe_neg = termios.TIOCSWINSZ
set_winsz_opcode_pos = termios.TIOCSWINSZ & 0xffffffffL
else:
set_winsz_opcode_pos = termios.TIOCSWINSZ
set_winsz_opcode_maybe_neg, = struct.unpack("i",
struct.pack("I", termios.TIOCSWINSZ))
our_winsz = struct.pack("HHHH",80,25,0,0)
# test both with a positive and potentially negative ioctl code
new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_pos, our_winsz)
new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_maybe_neg, our_winsz)
finally:
os.close(mfd)
os.close(sfd)
def test_main():
run_unittest(IoctlTests)
if __name__ == "__main__":
test_main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#// screen manager imported from http://kivy.org/docs/api-kivy.uix.screenmanager.html
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
from random import random
from kivy.uix.widget import Widget
from kivy.graphics import Color, Rectangle
from kivy.uix.button import Button
from kivy.graphics import Color, Ellipse, Line
from kivy.uix.image import Image
import sys
from kivy.clock import Clock
f = open('/home/pi/Picture-Yo-self/code/pictures/picName.txt','r')
picname = f.read()
f.close()
print picname
f = open('/home/pi/Picture-Yo-self/code/pictures/email.txt','r')
email = f.read()
f.close()
email = '/home/pi/Picture-Yo-self/code/pictures/' + email + '.png'
f = open('/home/pi/Picture-Yo-self/code/college.txt','r')
col = f.readline().strip()
f.close()
college = '/home/pi/Picture-Yo-self/code/pictures/' + col
print col
#college = '/home/pi/Picture-Yo-self/code/pictures/Jones.jpg'#' + col + '.jpg'
print college
#reload(sys)
class MyPaintWidget(Widget):
def on_touch_down(self, touch):
color = (random(), 1, 1)
with self.canvas:
Color(*color, mode='hsv')
touch.ud['line'] = Line(points=(touch.x, touch.y), width=3)
def on_touch_move(self, touch):
touch.ud['line'].points += [touch.x, touch.y]
class MainApp(App):
im=Image(source=picname, size_hint=(1,50))
crest=Image(source=college, size_hint=(25,25))#, pos=(1,1))
def build(self):
root = BoxLayout(orientation='vertical')
parent = BoxLayout(orientation='horizontal')
painter = MyPaintWidget()
crestwid = BoxLayout(orientation='horizontal')
# create clear button
clearbtn = Button(text='Clear', size_hint=(1,5))
parent.add_widget(clearbtn)
def clear_canvas(obj):
painter.canvas.clear()
clearbtn.bind(on_release=clear_canvas)
# create retake photo button
retakebtn = Button(text='Retake Photo', size_hint=(1,5))
parent.add_widget(retakebtn)
def retake_pic(obj):
execfile("momocapture.py")
self.im.reload()
painter.canvas.clear()
retakebtn.bind(on_release=retake_pic)
# create save button
savebtn = Button(text='Save and send to email', size_hint=(1,5))
parent.add_widget(savebtn)
def save_pic(obj):
parent.remove_widget(savebtn)
parent.remove_widget(clearbtn)
parent.remove_widget(retakebtn)
root.export_to_png(email)
exit()
savebtn.bind(on_release=save_pic)
crestwid.add_widget(self.crest)
parent.add_widget(crestwid)
root.add_widget(self.im)
root.add_widget(painter)
root.add_widget(parent)
#root.add_widget(crestwid)
return root
class RootWidget(BoxLayout):
pass
if __name__ == '__main__':
MainApp().run()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
import argparse
import json
from collections import defaultdict
from luigi import six
from luigi.six.moves.urllib.request import urlopen
class LuigiGrep(object):
def __init__(self, host, port):
self._host = host
self._port = port
@property
def graph_url(self):
return "http://{0}:{1}/api/graph".format(self._host, self._port)
def _fetch_json(self):
"""Returns the json representation of the dep graph"""
print("Fetching from url: " + self.graph_url)
resp = urlopen(self.graph_url).read()
return json.loads(resp.decode('utf-8'))
def _build_results(self, jobs, job):
job_info = jobs[job]
deps = job_info['deps']
deps_status = defaultdict(list)
for j in deps:
if j in jobs:
deps_status[jobs[j]['status']].append(j)
else:
deps_status['UNKNOWN'].append(j)
return {"name": job, "status": job_info['status'], "deps_by_status": deps_status}
def prefix_search(self, job_name_prefix):
"""searches for jobs matching the given job_name_prefix."""
json = self._fetch_json()
jobs = json['response']
for job in jobs:
if job.startswith(job_name_prefix):
yield self._build_results(jobs, job)
def status_search(self, status):
"""searches for jobs matching the given status"""
json = self._fetch_json()
jobs = json['response']
for job in jobs:
job_info = jobs[job]
if job_info['status'].lower() == status.lower():
yield self._build_results(jobs, job)
def main():
parser = argparse.ArgumentParser(
"luigi-grep is used to search for workflows using the luigi scheduler's json api")
parser.add_argument(
"--scheduler-host", default="localhost", help="hostname of the luigi scheduler")
parser.add_argument(
"--scheduler-port", default="8082", help="port of the luigi scheduler")
parser.add_argument("--prefix", help="prefix of a task query to search for", default=None)
parser.add_argument("--status", help="search for jobs with the given status", default=None)
args = parser.parse_args()
grep = LuigiGrep(args.scheduler_host, args.scheduler_port)
results = []
if args.prefix:
results = grep.prefix_search(args.prefix)
elif args.status:
results = grep.status_search(args.status)
for job in results:
print("{name}: {status}, Dependencies:".format(name=job['name'], status=job['status']))
for (status, jobs) in six.iteritems(job['deps_by_status']):
print(" status={status}".format(status=status))
for job in jobs:
print(" {job}".format(job=job))
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
parameters:
kind: nuget
extraOpts: --precompile
fulltest: false
steps:
- script: .\python.bat PC\layout -vv -s "$(Build.SourcesDirectory)" -b "$(Py_OutDir)\$(arch)" -t "$(Build.BinariesDirectory)\layout-tmp-${{ parameters.kind }}-$(arch)" --copy "$(Build.BinariesDirectory)\layout-${{ parameters.kind }}-$(arch)" ${{ parameters.extraOpts }} --preset-${{ parameters.kind }} --include-tests
displayName: Create ${{ parameters.kind }} layout
- script: .\python.exe -m test.pythoninfo
workingDirectory: $(Build.BinariesDirectory)\layout-${{ parameters.kind }}-$(arch)
displayName: Show layout info (${{ parameters.kind }})
- ${{ if eq(parameters.fulltest, 'true') }}:
- script: .\python.exe -m test -q -uall -u-cpu -rwW --slowest --timeout=1200 -j0 --junit-xml="$(Build.BinariesDirectory)\test-results-${{ parameters.kind }}.xml" --tempdir "$(Build.BinariesDirectory)\tmp-${{ parameters.kind }}-$(arch)" -i test_launcher
workingDirectory: $(Build.BinariesDirectory)\layout-${{ parameters.kind }}-$(arch)
displayName: ${{ parameters.kind }} Tests
env:
PREFIX: $(Build.BinariesDirectory)\layout-${{ parameters.kind }}-$(arch)
- task: PublishTestResults@2
displayName: Publish ${{ parameters.kind }} Test Results
inputs:
testResultsFiles: $(Build.BinariesDirectory)\test-results-${{ parameters.kind }}.xml
mergeTestResults: true
testRunTitle: ${{ parameters.kind }}-$(testRunTitle)
platform: $(testRunPlatform)
condition: succeededOrFailed()
|
unknown
|
github
|
https://github.com/python/cpython
|
.azure-pipelines/windows-layout-steps.yml
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""For internal use only; no backwards-compatibility guarantees."""
from google.protobuf import any_pb2
from google.protobuf import struct_pb2
def pack_Any(msg):
"""Creates a protobuf Any with msg as its content.
Returns None if msg is None.
"""
if msg is None:
return None
result = any_pb2.Any()
result.Pack(msg)
return result
def unpack_Any(any_msg, msg_class):
"""Unpacks any_msg into msg_class.
Returns None if msg_class is None.
"""
if msg_class is None:
return None
msg = msg_class()
any_msg.Unpack(msg)
return msg
def parse_Bytes(bytes, msg_class):
"""Parses the String of bytes into msg_class.
Returns the input bytes if msg_class is None."""
if msg_class is None:
return bytes
msg = msg_class()
msg.ParseFromString(bytes)
return msg
def pack_Struct(**kwargs):
"""Returns a struct containing the values indicated by kwargs.
"""
msg = struct_pb2.Struct()
for key, value in kwargs.items():
msg[key] = value # pylint: disable=unsubscriptable-object, unsupported-assignment-operation
return msg
def from_micros(cls, micros):
result = cls()
result.FromMicroseconds(micros)
return result
|
unknown
|
codeparrot/codeparrot-clean
| ||
# encoding:utf-8
"""
All constants could be used in other modules
For reasons that models, views can't have unicode
text in this project, all unicode text go here.
"""
from django.utils.translation import ugettext_lazy as _
import re
CLOSE_REASONS = (
(1, _('duplicate question')),
(2, _('question is off-topic or not relevant')),
(3, _('too subjective and argumentative')),
(4, _('not a real question')),
(5, _('the question is answered, right answer was accepted')),
(6, _('question is not relevant or outdated')),
(7, _('question contains offensive or malicious remarks')),
(8, _('spam or advertising')),
(9, _('too localized')),
)
LONG_TIME = 60*60*24*30 #30 days is a lot of time
DATETIME_FORMAT = '%I:%M %p, %d %b %Y'
TYPE_REPUTATION = (
(1, 'gain_by_upvoted'),
(2, 'gain_by_answer_accepted'),
(3, 'gain_by_accepting_answer'),
(4, 'gain_by_downvote_canceled'),
(5, 'gain_by_canceling_downvote'),
(-1, 'lose_by_canceling_accepted_answer'),
(-2, 'lose_by_accepted_answer_cancled'),
(-3, 'lose_by_downvoted'),
(-4, 'lose_by_flagged'),
(-5, 'lose_by_downvoting'),
(-6, 'lose_by_flagged_lastrevision_3_times'),
(-7, 'lose_by_flagged_lastrevision_5_times'),
(-8, 'lose_by_upvote_canceled'),
#for reputation type 10 Repute.comment field is required
(10, 'assigned_by_moderator'),
)
#do not translate keys
POST_SORT_METHODS = (
('age-desc', _('newest')),
('age-asc', _('oldest')),
('activity-desc', _('active')),
('activity-asc', _('inactive')),
('answers-desc', _('hottest')),
('answers-asc', _('coldest')),
('votes-desc', _('most voted')),
('votes-asc', _('least voted')),
('relevance-desc', _('relevance')),
)
POST_TYPES = ('answer', 'comment', 'question', 'tag_wiki', 'reject_reason')
SIMPLE_REPLY_SEPARATOR_TEMPLATE = '==== %s -=-=='
#values for SELF_NOTIFY_WHEN... settings use bits
NEVER = 'never'
FOR_FIRST_REVISION = 'first'
FOR_ANY_REVISION = 'any'
SELF_NOTIFY_EMAILED_POST_AUTHOR_WHEN_CHOICES = (
(NEVER, _('Never')),
(FOR_FIRST_REVISION, _('When new post is published')),
(FOR_ANY_REVISION, _('When post is published or revised')),
)
#need more options for web posts b/c user is looking at the page
#when posting. when posts are made by email - user is not looking
#at the site and therefore won't get any feedback unless an email is sent back
#todo: rename INITIAL -> FIRST and make values of type string
#FOR_INITIAL_REVISION_WHEN_APPROVED = 1
#FOR_ANY_REVISION_WHEN_APPROVED = 2
#FOR_INITIAL_REVISION_ALWAYS = 3
#FOR_ANY_REVISION_ALWAYS = 4
#SELF_NOTIFY_WEB_POST_AUTHOR_WHEN_CHOICES = (
# (NEVER, _('Never')),
# (
# FOR_INITIAL_REVISION_WHEN_APPROVED,
# _('When inital revision is approved by moderator')
# ),
# (
# FOR_ANY_REVISION_WHEN_APPROVED,
# _('When any revision is approved by moderator')
# ),
# (
# FOR_INITIAL_REVISION_ALWAYS,
# _('Any time when inital revision is published')
# ),
# (
# FOR_ANY_REVISION_ALWAYS,
# _('Any time when revision is published')
# )
#)
REPLY_SEPARATOR_TEMPLATE = '==== %(user_action)s %(instruction)s -=-=='
REPLY_WITH_COMMENT_TEMPLATE = _(
'Note: to reply with a comment, '
'please use <a href="mailto:%(addr)s?subject=%(subject)s">this link</a>'
)
REPLY_SEPARATOR_REGEX = re.compile(r'==== .* -=-==', re.MULTILINE|re.DOTALL)
ANSWER_SORT_METHODS = (#no translations needed here
'latest', 'oldest', 'votes'
)
#todo: add assertion here that all sort methods are unique
#because they are keys to the hash used in implementations
#of Q.run_advanced_search
DEFAULT_POST_SORT_METHOD = 'activity-desc'
POST_SCOPE_LIST = (
('all', _('all')),
('unanswered', _('unanswered')),
('favorite', _('favorite')),
)
DEFAULT_POST_SCOPE = 'all'
TAG_LIST_FORMAT_CHOICES = (
('list', _('list')),
('cloud', _('cloud')),
)
PAGE_SIZE_CHOICES = (('10', '10',), ('30', '30',), ('50', '50',),)
ANSWERS_PAGE_SIZE = 10
QUESTIONS_PER_PAGE_USER_CHOICES = ((10, u'10'), (30, u'30'), (50, u'50'),)
UNANSWERED_QUESTION_MEANING_CHOICES = (
('NO_ANSWERS', _('Question has no answers')),
('NO_ACCEPTED_ANSWERS', _('Question has no accepted answers')),
)
#todo: implement this
# ('NO_UPVOTED_ANSWERS',),
#)
#todo:
#this probably needs to be language-specific
#and selectable/changeable from the admin interface
#however it will be hard to expect that people will type
#correct regexes - plus this must be an anchored regex
#to do full string match
#IMPRTANT: tag related regexes must be portable between js and python
TAG_CHARS = r'\w+.#-'
TAG_REGEX_BARE = r'[%s]+' % TAG_CHARS
TAG_REGEX = r'^%s$' % TAG_REGEX_BARE
TAG_SPLIT_REGEX = r'[ ,]+'
TAG_SEP = ',' # has to be valid TAG_SPLIT_REGEX char and MUST NOT be in const.TAG_CHARS
#!!! see const.message_keys.TAG_WRONG_CHARS_MESSAGE
EMAIL_REGEX = re.compile(r'\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}\b', re.I)
TYPE_ACTIVITY_ASK_QUESTION = 1
TYPE_ACTIVITY_ANSWER = 2
TYPE_ACTIVITY_COMMENT_QUESTION = 3
TYPE_ACTIVITY_COMMENT_ANSWER = 4
TYPE_ACTIVITY_UPDATE_QUESTION = 5
TYPE_ACTIVITY_UPDATE_ANSWER = 6
TYPE_ACTIVITY_PRIZE = 7
TYPE_ACTIVITY_MARK_ANSWER = 8
TYPE_ACTIVITY_VOTE_UP = 9
TYPE_ACTIVITY_VOTE_DOWN = 10
TYPE_ACTIVITY_CANCEL_VOTE = 11
TYPE_ACTIVITY_DELETE_QUESTION = 12
TYPE_ACTIVITY_DELETE_ANSWER = 13
TYPE_ACTIVITY_MARK_OFFENSIVE = 14
TYPE_ACTIVITY_UPDATE_TAGS = 15
TYPE_ACTIVITY_FAVORITE = 16
TYPE_ACTIVITY_USER_FULL_UPDATED = 17
TYPE_ACTIVITY_EMAIL_UPDATE_SENT = 18
TYPE_ACTIVITY_MENTION = 19
TYPE_ACTIVITY_UNANSWERED_REMINDER_SENT = 20
TYPE_ACTIVITY_ACCEPT_ANSWER_REMINDER_SENT = 21
TYPE_ACTIVITY_CREATE_TAG_WIKI = 22
TYPE_ACTIVITY_UPDATE_TAG_WIKI = 23
TYPE_ACTIVITY_MODERATED_NEW_POST = 24
TYPE_ACTIVITY_MODERATED_POST_EDIT = 25
TYPE_ACTIVITY_CREATE_REJECT_REASON = 26
TYPE_ACTIVITY_UPDATE_REJECT_REASON = 27
TYPE_ACTIVITY_VALIDATION_EMAIL_SENT = 28
TYPE_ACTIVITY_POST_SHARED = 29
TYPE_ACTIVITY_ASK_TO_JOIN_GROUP = 30
#TYPE_ACTIVITY_EDIT_QUESTION = 17
#TYPE_ACTIVITY_EDIT_ANSWER = 18
#todo: rename this to TYPE_ACTIVITY_CHOICES
TYPE_ACTIVITY = (
(TYPE_ACTIVITY_ASK_QUESTION, _('asked a question')),
(TYPE_ACTIVITY_ANSWER, _('answered a question')),
(TYPE_ACTIVITY_COMMENT_QUESTION, _('commented question')),
(TYPE_ACTIVITY_COMMENT_ANSWER, _('commented answer')),
(TYPE_ACTIVITY_UPDATE_QUESTION, _('edited question')),
(TYPE_ACTIVITY_UPDATE_ANSWER, _('edited answer')),
(TYPE_ACTIVITY_PRIZE, _('received badge')),
(TYPE_ACTIVITY_MARK_ANSWER, _('marked best answer')),
(TYPE_ACTIVITY_VOTE_UP, _('upvoted')),
(TYPE_ACTIVITY_VOTE_DOWN, _('downvoted')),
(TYPE_ACTIVITY_CANCEL_VOTE, _('canceled vote')),
(TYPE_ACTIVITY_DELETE_QUESTION, _('deleted question')),
(TYPE_ACTIVITY_DELETE_ANSWER, _('deleted answer')),
(TYPE_ACTIVITY_MARK_OFFENSIVE, _('marked offensive')),
(TYPE_ACTIVITY_UPDATE_TAGS, _('updated tags')),
(TYPE_ACTIVITY_FAVORITE, _('selected favorite')),
(TYPE_ACTIVITY_USER_FULL_UPDATED, _('completed user profile')),
(TYPE_ACTIVITY_EMAIL_UPDATE_SENT, _('email update sent to user')),
(TYPE_ACTIVITY_POST_SHARED, _('a post was shared')),
(
TYPE_ACTIVITY_UNANSWERED_REMINDER_SENT,
_('reminder about unanswered questions sent'),
),
(
TYPE_ACTIVITY_ACCEPT_ANSWER_REMINDER_SENT,
_('reminder about accepting the best answer sent'),
),
(TYPE_ACTIVITY_MENTION, _('mentioned in the post')),
(
TYPE_ACTIVITY_CREATE_TAG_WIKI,
_('created tag description'),
),
(
TYPE_ACTIVITY_UPDATE_TAG_WIKI,
_('updated tag description')
),
(TYPE_ACTIVITY_MODERATED_NEW_POST, _('made a new post')),
(
TYPE_ACTIVITY_MODERATED_POST_EDIT,
_('made an edit')
),
(
TYPE_ACTIVITY_CREATE_REJECT_REASON,
_('created post reject reason'),
),
(
TYPE_ACTIVITY_UPDATE_REJECT_REASON,
_('updated post reject reason')
),
(
TYPE_ACTIVITY_VALIDATION_EMAIL_SENT,
'sent email address validation message'#don't translate, internal
),
)
#MENTION activity is added implicitly, unfortunately
RESPONSE_ACTIVITY_TYPES_FOR_INSTANT_NOTIFICATIONS = (
TYPE_ACTIVITY_COMMENT_QUESTION,
TYPE_ACTIVITY_COMMENT_ANSWER,
TYPE_ACTIVITY_UPDATE_ANSWER,
TYPE_ACTIVITY_UPDATE_QUESTION,
TYPE_ACTIVITY_ANSWER,
TYPE_ACTIVITY_ASK_QUESTION,
TYPE_ACTIVITY_POST_SHARED
)
#the same as for instant notifications for now
#MENTION activity is added implicitly, unfortunately
RESPONSE_ACTIVITY_TYPES_FOR_DISPLAY = (
TYPE_ACTIVITY_ANSWER,
TYPE_ACTIVITY_ASK_QUESTION,
TYPE_ACTIVITY_COMMENT_QUESTION,
TYPE_ACTIVITY_COMMENT_ANSWER,
TYPE_ACTIVITY_UPDATE_ANSWER,
TYPE_ACTIVITY_UPDATE_QUESTION,
TYPE_ACTIVITY_POST_SHARED,
# TYPE_ACTIVITY_PRIZE,
# TYPE_ACTIVITY_MARK_ANSWER,
# TYPE_ACTIVITY_VOTE_UP,
# TYPE_ACTIVITY_VOTE_DOWN,
# TYPE_ACTIVITY_CANCEL_VOTE,
# TYPE_ACTIVITY_DELETE_QUESTION,
# TYPE_ACTIVITY_DELETE_ANSWER,
# TYPE_ACTIVITY_MARK_OFFENSIVE,
# TYPE_ACTIVITY_FAVORITE,
)
RESPONSE_ACTIVITY_TYPE_MAP_FOR_TEMPLATES = {
TYPE_ACTIVITY_COMMENT_QUESTION: 'question_comment',
TYPE_ACTIVITY_COMMENT_ANSWER: 'answer_comment',
TYPE_ACTIVITY_UPDATE_ANSWER: 'answer_update',
TYPE_ACTIVITY_UPDATE_QUESTION: 'question_update',
TYPE_ACTIVITY_ANSWER: 'new_answer',
TYPE_ACTIVITY_ASK_QUESTION: 'new_question',
TYPE_ACTIVITY_POST_SHARED: 'post_shared'
}
assert(
set(RESPONSE_ACTIVITY_TYPES_FOR_INSTANT_NOTIFICATIONS) \
== set(RESPONSE_ACTIVITY_TYPE_MAP_FOR_TEMPLATES.keys())
)
TYPE_RESPONSE = {
'QUESTION_ANSWERED' : _('answered question'),
'QUESTION_COMMENTED': _('commented question'),
'ANSWER_COMMENTED' : _('commented answer'),
'ANSWER_ACCEPTED' : _('accepted answer'),
}
POST_STATUS = {
'closed': _('[closed]'),
'deleted': _('[deleted]'),
'default_version': _('initial version'),
'retagged': _('retagged'),
'private': _('[private]')
}
#choices used in email and display filters
INCLUDE_ALL = 0
EXCLUDE_IGNORED = 1
INCLUDE_INTERESTING = 2
INCLUDE_SUBSCRIBED = 3
TAG_DISPLAY_FILTER_STRATEGY_MINIMAL_CHOICES = (
(INCLUDE_ALL, _('show all tags')),
(EXCLUDE_IGNORED, _('exclude ignored tags')),
(INCLUDE_INTERESTING, _('only interesting tags'))
)
TAG_DISPLAY_FILTER_STRATEGY_CHOICES = \
TAG_DISPLAY_FILTER_STRATEGY_MINIMAL_CHOICES + \
((INCLUDE_SUBSCRIBED, _('only subscribed tags')),)
TAG_EMAIL_FILTER_SIMPLE_STRATEGY_CHOICES = (
(INCLUDE_ALL, _('email for all tags')),
(EXCLUDE_IGNORED, _('exclude ignored tags')),
(INCLUDE_INTERESTING, _('only interesting tags')),
)
TAG_EMAIL_FILTER_ADVANCED_STRATEGY_CHOICES = (
(INCLUDE_ALL, _('email for all tags')),
(EXCLUDE_IGNORED, _('exclude ignored tags')),
(INCLUDE_SUBSCRIBED, _('only subscribed tags')),
)
TAG_EMAIL_FILTER_FULL_STRATEGY_CHOICES = (
(INCLUDE_ALL, _('email for all tags')),
(EXCLUDE_IGNORED, _('exclude ignored tags')),
(INCLUDE_INTERESTING, _('only interesting tags')),
(INCLUDE_SUBSCRIBED, _('only subscribed tags')),
)
NOTIFICATION_DELIVERY_SCHEDULE_CHOICES = (
('i',_('instantly')),
('d',_('daily')),
('w',_('weekly')),
('n',_('no email')),
)
USERS_PAGE_SIZE = 28#todo: move it to settings?
USERNAME_REGEX_STRING = r'^[\w \-.@+\']+$'
GRAVATAR_TYPE_CHOICES = (
('identicon',_('identicon')),
('mm',_('mystery-man')),
('monsterid',_('monsterid')),
('wavatar',_('wavatar')),
('retro',_('retro')),
)
#chars that can go before or after @mention
TWITTER_STYLE_MENTION_TERMINATION_CHARS = '\n ;:,.!?<>"\''
COMMENT_HARD_MAX_LENGTH = 2048
#user status ch
USER_STATUS_CHOICES = (
#in addition to these there is administrator
#admin status is determined by the User.is_superuser() call
('m', _('moderator')), #user with moderation privilege
('a', _('approved')), #regular user
('w', _('watched')), #regular user placed on the moderation watch
('s', _('suspended')), #suspended user who cannot post new stuff
('b', _('blocked')), #blocked
)
DEFAULT_USER_STATUS = 'w'
#number of items to show in user views
USER_VIEW_DATA_SIZE = 50
#not really dependency, but external links, which it would
#be nice to test for correctness from time to time
DEPENDENCY_URLS = {
'akismet': 'https://akismet.com/signup/',
'cc-by-sa': 'http://creativecommons.org/licenses/by-sa/3.0/legalcode',
'embedding-video': \
'http://askbot.org/doc/optional-modules.html#embedding-video',
'favicon': 'http://en.wikipedia.org/wiki/Favicon',
'facebook-apps': 'http://www.facebook.com/developers/createapp.php',
'google-webmaster-tools': 'https://www.google.com/webmasters/tools/home',
'identica-apps': 'http://identi.ca/settings/oauthapps',
'noscript': 'https://www.google.com/support/bin/answer.py?answer=23852',
'linkedin-apps': 'https://www.linkedin.com/secure/developer',
'mathjax': 'http://www.mathjax.org/resources/docs/?installation.html',
'recaptcha': 'http://google.com/recaptcha',
'twitter-apps': 'http://dev.twitter.com/apps/',
}
PASSWORD_MIN_LENGTH = 8
GOLD_BADGE = 1
SILVER_BADGE = 2
BRONZE_BADGE = 3
BADGE_TYPE_CHOICES = (
(GOLD_BADGE, _('gold')),
(SILVER_BADGE, _('silver')),
(BRONZE_BADGE, _('bronze')),
)
BADGE_CSS_CLASSES = {
GOLD_BADGE: 'badge1',
SILVER_BADGE: 'badge2',
BRONZE_BADGE: 'badge3',
}
BADGE_DISPLAY_SYMBOL = '●'
MIN_REPUTATION = 1
AVATAR_STATUS_CHOICE = (
('n', _('None')),
('g', _('Gravatar')),#only if user has real uploaded gravatar
('a', _('Uploaded Avatar')),#avatar uploaded locally - with django-avatar app
)
SEARCH_ORDER_BY = (
('-added_at', _('date descendant')),
('added_at', _('date ascendant')),
('-last_activity_at', _('activity descendant')),
('last_activity_at', _('activity ascendant')),
('-answer_count', _('answers descendant')),
('answer_count', _('answers ascendant')),
('-points', _('votes descendant')),
('points', _('votes ascendant')),
)
DEFAULT_QUESTION_WIDGET_STYLE = """
@import url('http://fonts.googleapis.com/css?family=Yanone+Kaffeesatz:300,400,700');
body {
overflow: hidden;
}
#container {
width: 200px;
height: 350px;
}
ul {
list-style: none;
padding: 5px;
margin: 5px;
}
li {
border-bottom: #CCC 1px solid;
padding-bottom: 5px;
padding-top: 5px;
}
li:last-child {
border: none;
}
a {
text-decoration: none;
color: #464646;
font-family: 'Yanone Kaffeesatz', sans-serif;
font-size: 15px;
}
"""
#an exception import * because that file has only strings
from askbot.const.message_keys import *
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
MPEG-2 Transport Stream parser.
Documentation:
- MPEG-2 Transmission
http://erg.abdn.ac.uk/research/future-net/digital-video/mpeg2-trans.html
Author: Victor Stinner
Creation date: 13 january 2007
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError, MissingField,
UInt8, Enum, Bit, Bits, RawBytes)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal
class Packet(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
if self["has_error"].value:
self._size = 204*8
else:
self._size = 188*8
PID = {
0x0000: "Program Association Table (PAT)",
0x0001: "Conditional Access Table (CAT)",
# 0x0002..0x000f: reserved
# 0x0010..0x1FFE: network PID, program map PID, elementary PID, etc.
# TODO: Check above values
#0x0044: "video",
#0x0045: "audio",
0x1FFF: "Null packet",
}
def createFields(self):
yield textHandler(UInt8(self, "sync", 8), hexadecimal)
if self["sync"].value != 0x47:
raise ParserError("MPEG-2 TS: Invalid synchronization byte")
yield Bit(self, "has_error")
yield Bit(self, "payload_unit_start")
yield Bit(self, "priority")
yield Enum(textHandler(Bits(self, "pid", 13, "Program identifier"), hexadecimal), self.PID)
yield Bits(self, "scrambling_control", 2)
yield Bit(self, "has_adaptation")
yield Bit(self, "has_payload")
yield Bits(self, "counter", 4)
yield RawBytes(self, "payload", 184)
if self["has_error"].value:
yield RawBytes(self, "error_correction", 16)
def createDescription(self):
text = "Packet: PID %s" % self["pid"].display
if self["payload_unit_start"].value:
text += ", start of payload"
return text
def isValid(self):
if not self["has_payload"].value and not self["has_adaptation"].value:
return u"No payload and no adaptation"
pid = self["pid"].value
if (0x0002 <= pid <= 0x000f) or (0x2000 <= pid):
return u"Invalid program identifier (%s)" % self["pid"].display
return ""
class MPEG_TS(Parser):
PARSER_TAGS = {
"id": "mpeg_ts",
"category": "video",
"file_ext": ("ts",),
"min_size": 188*8,
"description": u"MPEG-2 Transport Stream"
}
endian = BIG_ENDIAN
def validate(self):
sync = self.stream.searchBytes("\x47", 0, 204*8)
if sync is None:
return "Unable to find synchronization byte"
for index in xrange(5):
try:
packet = self["packet[%u]" % index]
except (ParserError, MissingField):
if index and self.eof:
return True
else:
return "Unable to get packet #%u" % index
err = packet.isValid()
if err:
return "Packet #%u is invalid: %s" % (index, err)
return True
def createFields(self):
while not self.eof:
sync = self.stream.searchBytes("\x47", self.current_size, self.current_size+204*8)
if sync is None:
raise ParserError("Unable to find synchronization byte")
elif sync:
yield RawBytes(self, "incomplete_packet[]", (sync-self.current_size)//8)
yield Packet(self, "packet[]")
|
unknown
|
codeparrot/codeparrot-clean
| ||
[
{
"pk": 1,
"model": "composite_pk.tenant",
"fields": {
"id": 1,
"name": "Tenant 1"
}
},
{
"pk": 2,
"model": "composite_pk.tenant",
"fields": {
"id": 2,
"name": "Tenant 2"
}
},
{
"pk": 3,
"model": "composite_pk.tenant",
"fields": {
"id": 3,
"name": "Tenant 3"
}
},
{
"pk": [1, 1],
"model": "composite_pk.user",
"fields": {
"tenant_id": 1,
"id": 1,
"email": "user0001@example.com"
}
},
{
"pk": [1, 2],
"model": "composite_pk.user",
"fields": {
"tenant_id": 1,
"id": 2,
"email": "user0002@example.com"
}
},
{
"pk": [2, 3],
"model": "composite_pk.user",
"fields": {
"email": "user0003@example.com"
}
},
{
"model": "composite_pk.user",
"fields": {
"tenant_id": 2,
"id": 4,
"email": "user0004@example.com"
}
},
{
"pk": [2, "11111111-1111-1111-1111-111111111111"],
"model": "composite_pk.post",
"fields": {
"tenant_id": 2,
"id": "11111111-1111-1111-1111-111111111111"
}
},
{
"pk": [2, "ffffffff-ffff-ffff-ffff-ffffffffffff"],
"model": "composite_pk.post",
"fields": {
"tenant_id": 2,
"id": "ffffffff-ffff-ffff-ffff-ffffffffffff"
}
},
{
"pk": [1, "2022-01-12T05:55:14.956"],
"model": "composite_pk.timestamped",
"fields": {
"id": 1,
"created": "2022-01-12T05:55:14.956",
"text": ""
}
}
]
|
json
|
github
|
https://github.com/django/django
|
tests/composite_pk/fixtures/tenant.json
|
# MongoDB Development with Dev Containers
**⚠️ IMPORTANT:** This guide has been replaced with comprehensive documentation.
**👉 Please visit the new [Dev Container Documentation](./devcontainer/README.md) for:**
- 📖 [**Getting Started Guide**](./devcontainer/getting-started.md) - Step-by-step setup instructions
- 🏗️ [**Architecture & Technical Details**](./devcontainer/architecture.md) - How everything works under the hood
- 🔧 [**Troubleshooting Guide**](./devcontainer/troubleshooting.md) - Solutions to common issues
- 💡 [**Advanced Usage**](./devcontainer/advanced.md) - Customization and power user features
- ❓ [**FAQ**](./devcontainer/faq.md) - Frequently asked questions
|
unknown
|
github
|
https://github.com/mongodb/mongo
|
docs/devcontainer-setup.md
|
"""
==========================================
IsolationForest benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
np.random.seed(1)
datasets = ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
fig_roc, ax_roc = plt.subplots(1, 1, figsize=(8, 5))
for dat in datasets:
# loading and vectorization
print('loading data')
if dat in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dat, shuffle=True, percent10=True)
X = dataset.data
y = dataset.target
if dat == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
X, y = sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dat == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dat == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dat == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dat == 'http' or dat == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = X.shape
n_samples_train = n_samples // 2
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
print('IsolationForest processing...')
model = IsolationForest(n_jobs=-1)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = - model.decision_function(X_test) # the lower, the more normal
# Show score histograms
fig, ax = plt.subplots(3, sharex=True, sharey=True)
bins = np.linspace(-0.5, 0.5, 200)
ax[0].hist(scoring, bins, color='black')
ax[0].set_title('decision function for %s dataset' % dat)
ax[0].legend(loc="lower right")
ax[1].hist(scoring[y_test == 0], bins, color='b',
label='normal data')
ax[1].legend(loc="lower right")
ax[2].hist(scoring[y_test == 1], bins, color='r',
label='outliers')
ax[2].legend(loc="lower right")
# Show ROC Curves
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
label = ('%s (area: %0.3f, train-time: %0.2fs, '
'test-time: %0.2fs)' % (dat, AUC, fit_time, predict_time))
ax_roc.plot(fpr, tpr, lw=1, label=label)
ax_roc.set_xlim([-0.05, 1.05])
ax_roc.set_ylim([-0.05, 1.05])
ax_roc.set_xlabel('False Positive Rate')
ax_roc.set_ylabel('True Positive Rate')
ax_roc.set_title('Receiver operating characteristic (ROC) curves')
ax_roc.legend(loc="lower right")
fig_roc.tight_layout()
plt.show()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# frozen_string_literal: true
class Hardback < ActiveRecord::Base
end
class BestHardback < Hardback
end
|
ruby
|
github
|
https://github.com/rails/rails
|
activerecord/test/models/hardback.rb
|
from datetime import datetime
from django.core.exceptions import FieldError
from django.db import DEFAULT_DB_ALIAS, connection
from django.db.models import BooleanField, CharField, F, Q
from django.db.models.expressions import (
Col,
Exists,
ExpressionWrapper,
Func,
RawSQL,
Value,
)
from django.db.models.fields.related_lookups import RelatedIsNull
from django.db.models.functions import Lower
from django.db.models.lookups import Exact, GreaterThan, IsNull, LessThan
from django.db.models.sql.constants import SINGLE
from django.db.models.sql.query import JoinPromoter, Query, get_field_names_from_opts
from django.db.models.sql.where import AND, OR
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import register_lookup
from .models import Author, Item, ObjectC, Ranking
class TestQuery(SimpleTestCase):
def test_simple_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field("num"))
def test_non_alias_cols_query(self):
query = Query(Author, alias_cols=False)
where = query.build_where(Q(num__gt=2, name__isnull=False) | Q(num__lt=F("id")))
name_isnull_lookup, num_gt_lookup = where.children[0].children
self.assertIsInstance(num_gt_lookup, GreaterThan)
self.assertIsInstance(num_gt_lookup.lhs, Col)
self.assertIsNone(num_gt_lookup.lhs.alias)
self.assertIsInstance(name_isnull_lookup, IsNull)
self.assertIsInstance(name_isnull_lookup.lhs, Col)
self.assertIsNone(name_isnull_lookup.lhs.alias)
num_lt_lookup = where.children[1]
self.assertIsInstance(num_lt_lookup, LessThan)
self.assertIsInstance(num_lt_lookup.rhs, Col)
self.assertIsNone(num_lt_lookup.rhs.alias)
self.assertIsInstance(num_lt_lookup.lhs, Col)
self.assertIsNone(num_lt_lookup.lhs.alias)
def test_complex_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2) | Q(num__lt=0))
self.assertEqual(where.connector, OR)
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field("num"))
lookup = where.children[1]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.rhs, 0)
self.assertEqual(lookup.lhs.target, Author._meta.get_field("num"))
def test_multiple_fields(self):
query = Query(Item, alias_cols=False)
where = query.build_where(Q(modified__gt=F("created")))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertIsInstance(lookup.rhs, Col)
self.assertIsNone(lookup.rhs.alias)
self.assertIsInstance(lookup.lhs, Col)
self.assertIsNone(lookup.lhs.alias)
self.assertEqual(lookup.rhs.target, Item._meta.get_field("created"))
self.assertEqual(lookup.lhs.target, Item._meta.get_field("modified"))
def test_transform(self):
query = Query(Author, alias_cols=False)
with register_lookup(CharField, Lower):
where = query.build_where(~Q(name__lower="foo"))
lookup = where.children[0]
self.assertIsInstance(lookup, Exact)
self.assertIsInstance(lookup.lhs, Lower)
self.assertIsInstance(lookup.lhs.lhs, Col)
self.assertIsNone(lookup.lhs.lhs.alias)
self.assertEqual(lookup.lhs.lhs.target, Author._meta.get_field("name"))
def test_negated_nullable(self):
query = Query(Item)
where = query.build_where(~Q(modified__lt=datetime(2017, 1, 1)))
self.assertTrue(where.negated)
lookup = where.children[0]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.lhs.target, Item._meta.get_field("modified"))
lookup = where.children[1]
self.assertIsInstance(lookup, IsNull)
self.assertEqual(lookup.lhs.target, Item._meta.get_field("modified"))
def test_foreign_key(self):
query = Query(Item)
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
query.build_where(Q(creator__num__gt=2))
def test_foreign_key_f(self):
query = Query(Ranking)
with self.assertRaises(FieldError):
query.build_where(Q(rank__gt=F("author__num")))
def test_foreign_key_exclusive(self):
query = Query(ObjectC, alias_cols=False)
where = query.build_where(Q(objecta=None) | Q(objectb=None))
a_isnull = where.children[0]
self.assertIsInstance(a_isnull, RelatedIsNull)
self.assertIsInstance(a_isnull.lhs, Col)
self.assertIsNone(a_isnull.lhs.alias)
self.assertEqual(a_isnull.lhs.target, ObjectC._meta.get_field("objecta"))
b_isnull = where.children[1]
self.assertIsInstance(b_isnull, RelatedIsNull)
self.assertIsInstance(b_isnull.lhs, Col)
self.assertIsNone(b_isnull.lhs.alias)
self.assertEqual(b_isnull.lhs.target, ObjectC._meta.get_field("objectb"))
def test_clone_select_related(self):
query = Query(Item)
query.add_select_related(["creator"])
clone = query.clone()
clone.add_select_related(["note", "creator__extra"])
self.assertEqual(query.select_related, {"creator": {}})
def test_iterable_lookup_value(self):
query = Query(Item)
where = query.build_where(Q(name=["a", "b"]))
name_exact = where.children[0]
self.assertIsInstance(name_exact, Exact)
self.assertEqual(name_exact.rhs, "['a', 'b']")
def test_filter_conditional(self):
query = Query(Item)
where = query.build_where(Func(output_field=BooleanField()))
exact = where.children[0]
self.assertIsInstance(exact, Exact)
self.assertIsInstance(exact.lhs, Func)
self.assertIs(exact.rhs, True)
def test_filter_conditional_join(self):
query = Query(Item)
filter_expr = Func("note__note", output_field=BooleanField())
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
query.build_where(filter_expr)
def test_filter_non_conditional(self):
query = Query(Item)
msg = "Cannot filter against a non-conditional expression."
with self.assertRaisesMessage(TypeError, msg):
query.build_where(Func(output_field=CharField()))
class TestQueryNoModel(TestCase):
def test_rawsql_annotation(self):
query = Query(None)
sql = "%s = 1"
# Wrap with a CASE WHEN expression if a database backend (e.g. Oracle)
# doesn't support boolean expression in SELECT list.
if not connection.features.supports_boolean_expr_in_select_clause:
sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END"
query.add_annotation(RawSQL(sql, (1,), BooleanField()), "_check")
result = query.get_compiler(using=DEFAULT_DB_ALIAS).execute_sql(SINGLE)
self.assertEqual(result[0], 1)
def test_subquery_annotation(self):
query = Query(None)
query.add_annotation(Exists(Item.objects.all()), "_check")
result = query.get_compiler(using=DEFAULT_DB_ALIAS).execute_sql(SINGLE)
self.assertEqual(result[0], 0)
@skipUnlessDBFeature("supports_boolean_expr_in_select_clause")
def test_q_annotation(self):
query = Query(None)
check = ExpressionWrapper(
Q(RawSQL("%s = 1", (1,), BooleanField())) | Q(Exists(Item.objects.all())),
BooleanField(),
)
query.add_annotation(check, "_check")
result = query.get_compiler(using=DEFAULT_DB_ALIAS).execute_sql(SINGLE)
self.assertEqual(result[0], 1)
def test_names_to_path_field(self):
query = Query(None)
query.add_annotation(Value(True), "value")
path, final_field, targets, names = query.names_to_path(["value"], opts=None)
self.assertEqual(path, [])
self.assertIsInstance(final_field, BooleanField)
self.assertEqual(len(targets), 1)
self.assertIsInstance(targets[0], BooleanField)
self.assertEqual(names, [])
def test_names_to_path_field_error(self):
query = Query(None)
msg = "Cannot resolve keyword 'nonexistent' into field."
with self.assertRaisesMessage(FieldError, msg):
query.names_to_path(["nonexistent"], opts=None)
def test_get_field_names_from_opts(self):
self.assertEqual(get_field_names_from_opts(None), set())
class JoinPromoterTest(SimpleTestCase):
def test_repr(self):
self.assertEqual(
repr(JoinPromoter(AND, 3, True)),
"JoinPromoter(connector='AND', num_children=3, negated=True)",
)
|
python
|
github
|
https://github.com/django/django
|
tests/queries/test_query.py
|
import unittest
try:
from unittest.mock import *
except ImportError:
from mock import *
from msgpack import *
from cvra_bootloader.read_config import main
from cvra_bootloader.commands import *
import sys
import json
class ReadConfigToolTestCase(unittest.TestCase):
@patch('cvra_bootloader.utils.write_command_retry')
@patch('cvra_bootloader.utils.write_command')
@patch('cvra_bootloader.utils.open_connection')
@patch('builtins.print')
def test_integration(self, print_mock, open_conn, write_command,
write_command_retry):
sys.argv = "test.py -p /dev/ttyUSB0 0 1 2".split()
configs = [{'id': i} for i in range(3)]
write_command_retry.return_value = {
i: packb(configs[i]) for i in range(3)
}
open_conn.return_value = object()
main()
write_command_retry.assert_any_call(open_conn.return_value,
encode_read_config(), [0, 1, 2])
all_configs = {i: configs[i] for i in range(3)}
print_mock.assert_any_call(json.dumps(all_configs, indent=4,
sort_keys=True))
@patch('cvra_bootloader.utils.open_connection')
@patch('cvra_bootloader.utils.write_command_retry')
@patch('cvra_bootloader.utils.write_command')
@patch('cvra_bootloader.utils.read_can_datagrams')
@patch('builtins.print')
def test_network_discovery(self, print_mock, read_can_datagram,
write_command, write_command_retry, open_conn):
"""
Checks if we can perform a whole network discovery.
"""
sys.argv = "test.py -p /dev/ttyUSB0 --all".split()
# The first two board answers the ping
board_answers = [(b'', [0], i) for i in range(1, 3)] + [None]
read_can_datagram.return_value = iter(board_answers)
write_command_retry.return_value = {
i: packb({'id': i}) for i in range(1, 3)
}
main()
write_command.assert_any_call(open_conn.return_value,
encode_ping(),
list(range(1, 128)))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
import json
from ..base.decrypter import BaseDecrypter
class MediafireComFolder(BaseDecrypter):
__name__ = "MediafireComFolder"
__type__ = "decrypter"
__version__ = "0.25"
__status__ = "testing"
__pattern__ = (
r"https?://(?:www\.)?mediafire\.com/(?:folder/|\?sharekey=|\?)(?P<ID>\w+)"
)
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
(
"folder_per_package",
"Default;Yes;No",
"Create folder for each package",
"Default",
),
]
__description__ = """Mediafire.com folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [
("zoidberg", "zoidberg@mujmail.cz"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
# See http://www.mediafire.com/developers/core_api/
API_URL = "http://www.mediafire.com/api/"
def api_response(self, method, **kwargs):
kwargs["response_format"] = "json"
json_data = self.load(self.API_URL + method + ".php", get=kwargs)
res = json.loads(json_data)
if res["response"]["result"] != "Success":
self.fail(res["response"]["message"])
return res
def decrypt(self, pyfile):
api_data = self.api_response(
"folder/get_info", folder_key=self.info["pattern"]["ID"]
)
pack_name = (
api_data["response"]["folder_info"].get("name")
or self.pyfile.package().name
)
api_data = self.api_response(
"folder/get_content",
folder_key=self.info["pattern"]["ID"],
content_type="files",
)
pack_links = [
"http://www.mediafire.com/file/{}".format(f["quickkey"])
for f in api_data["response"]["folder_content"]["files"]
]
if pack_links:
self.packages.append((pack_name, pack_links, pack_name))
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dryrun
import (
"fmt"
"io"
"net"
"os"
"path/filepath"
"strconv"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
errorsutil "k8s.io/apimachinery/pkg/util/errors"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
)
// FileToPrint represents a temporary file on disk that might want to be aliased when printing
// Useful for things like loading a file from /tmp/ but saying to the user "Would write file foo to /etc/kubernetes/..."
type FileToPrint struct {
RealPath string
PrintPath string
}
// NewFileToPrint makes a new instance of FileToPrint with the specified arguments
func NewFileToPrint(realPath, printPath string) FileToPrint {
return FileToPrint{
RealPath: realPath,
PrintPath: printPath,
}
}
// PrintDryRunFile is a helper method around PrintDryRunFiles
func PrintDryRunFile(fileName, realDir, printDir string, w io.Writer) error {
return PrintDryRunFiles([]FileToPrint{
NewFileToPrint(filepath.Join(realDir, fileName), filepath.Join(printDir, fileName)),
}, w)
}
// PrintDryRunFiles prints the contents of the FileToPrints given to it to the writer w
func PrintDryRunFiles(files []FileToPrint, w io.Writer) error {
errs := []error{}
for _, file := range files {
if len(file.RealPath) == 0 {
continue
}
fileBytes, err := os.ReadFile(file.RealPath)
if err != nil {
errs = append(errs, err)
continue
}
// Make it possible to fake the path of the file; i.e. you may want to tell the user
// "Here is what would be written to /etc/kubernetes/admin.conf", although you wrote it to /tmp/kubeadm-dryrun/admin.conf and are loading it from there
// Fall back to the "real" path if PrintPath is not set
outputFilePath := file.PrintPath
if len(outputFilePath) == 0 {
outputFilePath = file.RealPath
}
outputFilePath = filepath.ToSlash(outputFilePath)
fmt.Fprintf(w, "[dryrun] Would write file %q with content:\n", outputFilePath)
fmt.Fprintf(w, "%s", fileBytes)
}
return errorsutil.NewAggregate(errs)
}
// Waiter is an implementation of apiclient.Waiter that should be used for dry-running
type Waiter struct{}
// NewWaiter returns a new Waiter object that talks to the given Kubernetes cluster
func NewWaiter() apiclient.Waiter {
return &Waiter{}
}
// WaitForControlPlaneComponents just returns a dummy nil, to indicate that the program should just proceed
func (w *Waiter) WaitForControlPlaneComponents(podsMap map[string]*v1.Pod, apiServerAddress string) error {
return nil
}
// WaitForPodsWithLabel just returns a dummy nil, to indicate that the program should just proceed
func (w *Waiter) WaitForPodsWithLabel(kvLabel string) error {
fmt.Printf("[dryrun] Would wait for the Pods with the label %q in the %s namespace to become Running\n", kvLabel, metav1.NamespaceSystem)
return nil
}
// WaitForKubelet blocks until the kubelet /healthz endpoint returns 'ok'
func (w *Waiter) WaitForKubelet(healthzAddress string, healthzPort int32) error {
var (
addrPort = net.JoinHostPort(healthzAddress, strconv.Itoa(int(healthzPort)))
healthzEndpoint = fmt.Sprintf("http://%s/healthz", addrPort)
)
fmt.Printf("[dryrun] Would make sure the kubelet returns 'ok' at %s\n", healthzEndpoint)
return nil
}
// SetTimeout is a no-op; we don't wait in this implementation
func (w *Waiter) SetTimeout(_ time.Duration) {}
// WaitForStaticPodControlPlaneHashes returns an empty hash for all control plane images;
func (w *Waiter) WaitForStaticPodControlPlaneHashes(_ string) (map[string]string, error) {
return map[string]string{
kubeadmconstants.KubeAPIServer: "",
kubeadmconstants.KubeControllerManager: "",
kubeadmconstants.KubeScheduler: "",
}, nil
}
// WaitForStaticPodSingleHash returns an empty hash
// but the empty strings there are needed
func (w *Waiter) WaitForStaticPodSingleHash(_ string, _ string) (string, error) {
return "", nil
}
// WaitForStaticPodHashChange returns a dummy nil error in order for the flow to just continue as we're dryrunning
func (w *Waiter) WaitForStaticPodHashChange(_, _, _ string) error {
return nil
}
// PrintFilesIfDryRunning prints the static pod manifests to stdout and informs about the temporary directory to go and lookup when dry running
func PrintFilesIfDryRunning(needPrintManifest bool, manifestDir string, outputWriter io.Writer) error {
var files []FileToPrint
// Print static pod manifests if it is a control plane
if needPrintManifest {
fmt.Printf("[dryrun] Wrote certificates, kubeconfig files and control plane manifests to the %q directory\n", manifestDir)
for _, component := range kubeadmconstants.ControlPlaneComponents {
realPath := kubeadmconstants.GetStaticPodFilepath(component, manifestDir)
outputPath := kubeadmconstants.GetStaticPodFilepath(component, kubeadmconstants.GetStaticPodDirectory())
files = append(files, NewFileToPrint(realPath, outputPath))
}
} else {
fmt.Printf("[dryrun] Wrote certificates and kubeconfig files to the %q directory\n", manifestDir)
}
fmt.Println("[dryrun] The certificates or kubeconfig files would not be printed due to their sensitive nature")
fmt.Printf("[dryrun] Please examine the %q directory for details about what would be written\n", manifestDir)
// Print kubelet config manifests
kubeletConfigFiles := []string{kubeadmconstants.KubeletConfigurationFileName, kubeadmconstants.KubeletEnvFileName}
for _, filename := range kubeletConfigFiles {
realPath := filepath.Join(manifestDir, filename)
outputPath := filepath.Join(kubeadmconstants.KubeletRunDirectory, filename)
files = append(files, NewFileToPrint(realPath, outputPath))
}
return PrintDryRunFiles(files, outputWriter)
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
cmd/kubeadm/app/util/dryrun/dryrun.go
|
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.tensor.nnet.conv import conv2d
from theano import function
import time
import matplotlib.pyplot as plt
def make_funcs(batch_size, rows, cols, channels, filter_rows,
num_filters):
rng = np.random.RandomState([2012,10,9])
filter_cols = filter_rows
base_image_value = rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32')
base_filters_value = rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32')
images = shared(base_image_value)
filters = shared(base_filters_value, name='filters')
# bench.py should always be run in gpu mode so we should not need a gpu_from_host here
layer_1_detector = FilterActs()(images, filters)
layer_1_pooled_fake = layer_1_detector[:,0:layer_1_detector.shape[0]:2,
0:layer_1_detector.shape[1]:2, :]
base_filters2_value = rng.uniform(-1., 1., (num_filters, filter_rows,
filter_cols, num_filters)).astype('float32')
filters2 = shared(base_filters_value, name='filters')
layer_2_detector = FilterActs()(images, filters2)
output = layer_2_detector
output_shared = shared( output.eval() )
cuda_convnet = function([], updates = { output_shared : output } )
cuda_convnet.name = 'cuda_convnet'
images_bc01 = base_image_value.transpose(3,0,1,2)
filters_bc01 = base_filters_value.transpose(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
images_bc01 = shared(images_bc01)
filters_bc01 = shared(filters_bc01)
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid')
output_conv2d_shared = shared(output_conv2d.eval())
baseline = function([], updates = { output_conv2d_shared : output_conv2d } )
baseline.name = 'baseline'
return cuda_convnet, baseline
def bench(f):
for i in xrange(3):
f()
trials = 10
t1 = time.time()
for i in xrange(trials):
f()
t2 = time.time()
return (t2-t1)/float(trials)
def get_speedup( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
return bench(baseline) / bench(cuda_convnet)
def get_time_per_10k_ex( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
batch_size = kwargs['batch_size']
return 10000 * bench(cuda_convnet) / float(batch_size)
def make_batch_size_plot(yfunc, yname, batch_sizes, rows, cols, channels, filter_rows, num_filters):
speedups = []
for batch_size in batch_sizes:
speedup = yfunc(batch_size = batch_size,
rows = rows,
cols = cols,
channels = channels,
filter_rows = filter_rows,
num_filters = num_filters)
speedups.append(speedup)
plt.plot(batch_sizes, speedups)
plt.title("cuda-convnet benchmark")
plt.xlabel("Batch size")
plt.ylabel(yname)
plt.show()
"""
make_batch_size_plot(get_speedup, "Speedup factor", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 7,
num_filters = 64)
"""
make_batch_size_plot(get_time_per_10k_ex, "Time per 10k examples", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 5,
num_filters = 64)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_auth
short_description: Sets or updates the password for a storage array.
description:
- Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web
Services proxy. Note, all storage arrays do not have a Monitor or RO role.
version_added: "2.2"
author: Kevin Hulquest (@hulquest)
options:
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
name:
description:
- The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use
the ID instead.
required: False
ssid:
description:
- the identifier of the storage array in the Web Services Proxy.
required: False
set_admin:
description:
- Boolean value on whether to update the admin password. If set to false then the RO account is updated.
default: False
current_password:
description:
- The current admin password. This is not required if the password hasn't been set before.
required: False
new_password:
description:
- The password you would like to set. Cannot be more than 30 characters.
required: True
api_url:
description:
- The full API url.
- "Example: http://ENDPOINT:8080/devmgr/v2"
- This can optionally be set via an environment variable, API_URL
required: False
api_username:
description:
- The username used to authenticate against the API
- This can optionally be set via an environment variable, API_USERNAME
required: False
api_password:
description:
- The password used to authenticate against the API
- This can optionally be set via an environment variable, API_PASSWORD
required: False
'''
EXAMPLES = '''
- name: Test module
netapp_e_auth:
name: trex
current_password: OldPasswd
new_password: NewPasswd
set_admin: yes
api_url: '{{ netapp_api_url }}'
api_username: '{{ netapp_api_username }}'
api_password: '{{ netapp_api_password }}'
'''
RETURN = '''
msg:
description: Success message
returned: success
type: string
sample: "Password Updated Successfully"
'''
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json"
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def get_ssid(module, name, api_url, user, pwd):
count = 0
all_systems = 'storage-systems'
systems_url = api_url + all_systems
rc, data = request(systems_url, headers=HEADERS, url_username=user, url_password=pwd)
for system in data:
if system['name'] == name:
count += 1
if count > 1:
module.fail_json(
msg="You supplied a name for the Storage Array but more than 1 array was found with that name. " +
"Use the id instead")
else:
ssid = system['id']
else:
continue
if count == 0:
module.fail_json(msg="No storage array with the name %s was found" % name)
else:
return ssid
def get_pwd_status(module, ssid, api_url, user, pwd):
pwd_status = "storage-systems/%s/passwords" % ssid
url = api_url + pwd_status
try:
rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
return data['readOnlyPasswordSet'], data['adminPasswordSet']
except HTTPError:
error = get_exception()
module.fail_json(msg="There was an issue with connecting, please check that your "
"endpoint is properly defined and your credentials are correct: %s" % str(error))
def update_storage_system_pwd(module, ssid, pwd, api_url, api_usr, api_pwd):
update_pwd = 'storage-systems/%s' % ssid
url = api_url + update_pwd
post_body = json.dumps(dict(storedPassword=pwd))
try:
rc, data = request(url, data=post_body, method='POST', headers=HEADERS, url_username=api_usr,
url_password=api_pwd)
except:
err = get_exception()
module.fail_json(msg="Failed to update system password. Id [%s]. Error [%s]" % (ssid, str(err)))
return data
def set_password(module, ssid, api_url, user, pwd, current_password=None, new_password=None, set_admin=False):
set_pass = "storage-systems/%s/passwords" % ssid
url = api_url + set_pass
if not current_password:
current_password = ""
post_body = json.dumps(
dict(currentAdminPassword=current_password, adminPassword=set_admin, newPassword=new_password))
try:
rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
ignore_errors=True)
except:
err = get_exception()
module.fail_json(msg="Failed to set system password. Id [%s]. Error [%s]" % (ssid, str(err)))
if rc == 422:
post_body = json.dumps(dict(currentAdminPassword='', adminPassword=set_admin, newPassword=new_password))
try:
rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd)
except Exception:
module.fail_json(msg="Wrong or no admin password supplied. Please update your playbook and try again")
update_data = update_storage_system_pwd(module, ssid, new_password, api_url, user, pwd)
if int(rc) == 204:
return update_data
else:
module.fail_json(msg="%s:%s" % (rc, data))
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
name=dict(required=False, type='str'),
ssid=dict(required=False, type='str'),
current_password=dict(required=False, no_log=True),
new_password=dict(required=True, no_log=True),
set_admin=dict(required=True, type='bool'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True)
)
)
module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['name', 'ssid']],
required_one_of=[['name', 'ssid']])
name = module.params['name']
ssid = module.params['ssid']
current_password = module.params['current_password']
new_password = module.params['new_password']
set_admin = module.params['set_admin']
user = module.params['api_username']
pwd = module.params['api_password']
api_url = module.params['api_url']
if not api_url.endswith('/'):
api_url += '/'
if name:
ssid = get_ssid(module, name, api_url, user, pwd)
ro_pwd, admin_pwd = get_pwd_status(module, ssid, api_url, user, pwd)
if admin_pwd and not current_password:
module.fail_json(
msg="Admin account has a password set. " +
"You must supply current_password in order to update the RO or Admin passwords")
if len(new_password) > 30:
module.fail_json(msg="Passwords must not be greater than 30 characters in length")
success = set_password(module, ssid, api_url, user, pwd, current_password=current_password,
new_password=new_password,
set_admin=set_admin)
module.exit_json(changed=True, msg="Password Updated Successfully", **success)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# orm/collections.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for collections of mapped entities.
The collections package supplies the machinery used to inform the ORM of
collection membership changes. An instrumentation via decoration approach is
used, allowing arbitrary types (including built-ins) to be used as entity
collections without requiring inheritance from a base class.
Instrumentation decoration relays membership change events to the
:class:`.CollectionAttributeImpl` that is currently managing the collection.
The decorators observe function call arguments and return values, tracking
entities entering or leaving the collection. Two decorator approaches are
provided. One is a bundle of generic decorators that map function arguments
and return values to events::
from sqlalchemy.orm.collections import collection
class MyClass(object):
# ...
@collection.adds(1)
def store(self, item):
self.data.append(item)
@collection.removes_return()
def pop(self):
return self.data.pop()
The second approach is a bundle of targeted decorators that wrap appropriate
append and remove notifiers around the mutation methods present in the
standard Python ``list``, ``set`` and ``dict`` interfaces. These could be
specified in terms of generic decorator recipes, but are instead hand-tooled
for increased efficiency. The targeted decorators occasionally implement
adapter-like behavior, such as mapping bulk-set methods (``extend``,
``update``, ``__setslice__``, etc.) into the series of atomic mutation events
that the ORM requires.
The targeted decorators are used internally for automatic instrumentation of
entity collection classes. Every collection class goes through a
transformation process roughly like so:
1. If the class is a built-in, substitute a trivial sub-class
2. Is this class already instrumented?
3. Add in generic decorators
4. Sniff out the collection interface through duck-typing
5. Add targeted decoration to any undecorated interface method
This process modifies the class at runtime, decorating methods and adding some
bookkeeping properties. This isn't possible (or desirable) for built-in
classes like ``list``, so trivial sub-classes are substituted to hold
decoration::
class InstrumentedList(list):
pass
Collection classes can be specified in ``relationship(collection_class=)`` as
types or a function that returns an instance. Collection classes are
inspected and instrumented during the mapper compilation phase. The
collection_class callable will be executed once to produce a specimen
instance, and the type of that specimen will be instrumented. Functions that
return built-in types like ``lists`` will be adapted to produce instrumented
instances.
When extending a known type like ``list``, additional decorations are not
generally not needed. Odds are, the extension method will delegate to a
method that's already instrumented. For example::
class QueueIsh(list):
def push(self, item):
self.append(item)
def shift(self):
return self.pop(0)
There's no need to decorate these methods. ``append`` and ``pop`` are already
instrumented as part of the ``list`` interface. Decorating them would fire
duplicate events, which should be avoided.
The targeted decoration tries not to rely on other methods in the underlying
collection class, but some are unavoidable. Many depend on 'read' methods
being present to properly instrument a 'write', for example, ``__setitem__``
needs ``__getitem__``. "Bulk" methods like ``update`` and ``extend`` may also
reimplemented in terms of atomic appends and removes, so the ``extend``
decoration will actually perform many ``append`` operations and not call the
underlying method at all.
Tight control over bulk operation and the firing of events is also possible by
implementing the instrumentation internally in your methods. The basic
instrumentation package works under the general assumption that collection
mutation will not raise unusual exceptions. If you want to closely
orchestrate append and remove events with exception management, internal
instrumentation may be the answer. Within your method,
``collection_adapter(self)`` will retrieve an object that you can use for
explicit control over triggering append and remove events.
The owning object and :class:`.CollectionAttributeImpl` are also reachable
through the adapter, allowing for some very sophisticated behavior.
"""
import inspect
import operator
import weakref
from ..sql import expression
from .. import util, exc as sa_exc
from . import base
__all__ = ['collection', 'collection_adapter',
'mapped_collection', 'column_mapped_collection',
'attribute_mapped_collection']
__instrumentation_mutex = util.threading.Lock()
class _PlainColumnGetter(object):
"""Plain column getter, stores collection of Column objects
directly.
Serializes to a :class:`._SerializableColumnGetterV2`
which has more expensive __call__() performance
and some rare caveats.
"""
def __init__(self, cols):
self.cols = cols
self.composite = len(cols) > 1
def __reduce__(self):
return _SerializableColumnGetterV2._reduce_from_cols(self.cols)
def _cols(self, mapper):
return self.cols
def __call__(self, value):
state = base.instance_state(value)
m = base._state_mapper(state)
key = [
m._get_state_attr_by_column(state, state.dict, col)
for col in self._cols(m)
]
if self.composite:
return tuple(key)
else:
return key[0]
class _SerializableColumnGetter(object):
"""Column-based getter used in version 0.7.6 only.
Remains here for pickle compatibility with 0.7.6.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return _SerializableColumnGetter, (self.colkeys,)
def __call__(self, value):
state = base.instance_state(value)
m = base._state_mapper(state)
key = [m._get_state_attr_by_column(
state, state.dict,
m.mapped_table.columns[k])
for k in self.colkeys]
if self.composite:
return tuple(key)
else:
return key[0]
class _SerializableColumnGetterV2(_PlainColumnGetter):
"""Updated serializable getter which deals with
multi-table mapped classes.
Two extremely unusual cases are not supported.
Mappings which have tables across multiple metadata
objects, or which are mapped to non-Table selectables
linked across inheriting mappers may fail to function
here.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return self.__class__, (self.colkeys,)
@classmethod
def _reduce_from_cols(cls, cols):
def _table_key(c):
if not isinstance(c.table, expression.TableClause):
return None
else:
return c.table.key
colkeys = [(c.key, _table_key(c)) for c in cols]
return _SerializableColumnGetterV2, (colkeys,)
def _cols(self, mapper):
cols = []
metadata = getattr(mapper.local_table, 'metadata', None)
for (ckey, tkey) in self.colkeys:
if tkey is None or \
metadata is None or \
tkey not in metadata:
cols.append(mapper.local_table.c[ckey])
else:
cols.append(metadata.tables[tkey].c[ckey])
return cols
def column_mapped_collection(mapping_spec):
"""A dictionary-based collection type with column-based keying.
Returns a :class:`.MappedCollection` factory with a keying function
generated from mapping_spec, which may be a Column or a sequence
of Columns.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
cols = [expression._only_column_elements(q, "mapping_spec")
for q in util.to_list(mapping_spec)
]
keyfunc = _PlainColumnGetter(cols)
return lambda: MappedCollection(keyfunc)
class _SerializableAttrGetter(object):
def __init__(self, name):
self.name = name
self.getter = operator.attrgetter(name)
def __call__(self, target):
return self.getter(target)
def __reduce__(self):
return _SerializableAttrGetter, (self.name, )
def attribute_mapped_collection(attr_name):
"""A dictionary-based collection type with attribute-based keying.
Returns a :class:`.MappedCollection` factory with a keying based on the
'attr_name' attribute of entities in the collection, where ``attr_name``
is the string name of the attribute.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
getter = _SerializableAttrGetter(attr_name)
return lambda: MappedCollection(getter)
def mapped_collection(keyfunc):
"""A dictionary-based collection type with arbitrary keying.
Returns a :class:`.MappedCollection` factory with a keying function
generated from keyfunc, a callable that takes an entity and returns a
key value.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
return lambda: MappedCollection(keyfunc)
class collection(object):
"""Decorators for entity collection classes.
The decorators fall into two groups: annotations and interception recipes.
The annotating decorators (appender, remover, iterator, linker, converter,
internally_instrumented) indicate the method's purpose and take no
arguments. They are not written with parens::
@collection.appender
def append(self, append): ...
The recipe decorators all require parens, even those that take no
arguments::
@collection.adds('entity')
def insert(self, position, entity): ...
@collection.removes_return()
def popitem(self): ...
"""
# Bundled as a class solely for ease of use: packaging, doc strings,
# importability.
@staticmethod
def appender(fn):
"""Tag the method as the collection appender.
The appender method is called with one positional argument: the value
to append. The method will be automatically decorated with 'adds(1)'
if not already decorated::
@collection.appender
def add(self, append): ...
# or, equivalently
@collection.appender
@collection.adds(1)
def add(self, append): ...
# for mapping type, an 'append' may kick out a previous value
# that occupies that slot. consider d['a'] = 'foo'- any previous
# value in d['a'] is discarded.
@collection.appender
@collection.replaces(1)
def add(self, entity):
key = some_key_func(entity)
previous = None
if key in self:
previous = self[key]
self[key] = entity
return previous
If the value to append is not allowed in the collection, you may
raise an exception. Something to remember is that the appender
will be called for each object mapped by a database query. If the
database contains rows that violate your collection semantics, you
will need to get creative to fix the problem, as access via the
collection will not work.
If the appender method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = 'appender'
return fn
@staticmethod
def remover(fn):
"""Tag the method as the collection remover.
The remover method is called with one positional argument: the value
to remove. The method will be automatically decorated with
:meth:`removes_return` if not already decorated::
@collection.remover
def zap(self, entity): ...
# or, equivalently
@collection.remover
@collection.removes_return()
def zap(self, ): ...
If the value to remove is not present in the collection, you may
raise an exception or return None to ignore the error.
If the remove method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = 'remover'
return fn
@staticmethod
def iterator(fn):
"""Tag the method as the collection remover.
The iterator method is called with no arguments. It is expected to
return an iterator over all collection members::
@collection.iterator
def __iter__(self): ...
"""
fn._sa_instrument_role = 'iterator'
return fn
@staticmethod
def internally_instrumented(fn):
"""Tag the method as instrumented.
This tag will prevent any decoration from being applied to the
method. Use this if you are orchestrating your own calls to
:func:`.collection_adapter` in one of the basic SQLAlchemy
interface methods, or to prevent an automatic ABC method
decoration from wrapping your implementation::
# normally an 'extend' method on a list-like class would be
# automatically intercepted and re-implemented in terms of
# SQLAlchemy events and append(). your implementation will
# never be called, unless:
@collection.internally_instrumented
def extend(self, items): ...
"""
fn._sa_instrumented = True
return fn
@staticmethod
def linker(fn):
"""Tag the method as a "linked to attribute" event handler.
This optional event handler will be called when the collection class
is linked to or unlinked from the InstrumentedAttribute. It is
invoked immediately after the '_sa_adapter' property is set on
the instance. A single argument is passed: the collection adapter
that has been linked, or None if unlinking.
.. deprecated:: 1.0.0 - the :meth:`.collection.linker` handler
is superseded by the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` handlers.
"""
fn._sa_instrument_role = 'linker'
return fn
link = linker
"""deprecated; synonym for :meth:`.collection.linker`."""
@staticmethod
def converter(fn):
"""Tag the method as the collection converter.
This optional method will be called when a collection is being
replaced entirely, as in::
myobj.acollection = [newvalue1, newvalue2]
The converter method will receive the object being assigned and should
return an iterable of values suitable for use by the ``appender``
method. A converter must not assign values or mutate the collection,
its sole job is to adapt the value the user provides into an iterable
of values for the ORM's use.
The default converter implementation will use duck-typing to do the
conversion. A dict-like collection will be convert into an iterable
of dictionary values, and other types will simply be iterated::
@collection.converter
def convert(self, other): ...
If the duck-typing of the object does not match the type of this
collection, a TypeError is raised.
Supply an implementation of this method if you want to expand the
range of possible types that can be assigned in bulk or perform
validation on the values about to be assigned.
"""
fn._sa_instrument_role = 'converter'
return fn
@staticmethod
def adds(arg):
"""Mark the method as adding an entity to the collection.
Adds "add to collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value. Arguments can be specified positionally (i.e. integer) or by
name::
@collection.adds(1)
def push(self, item): ...
@collection.adds('entity')
def do_stuff(self, thing, entity=None): ...
"""
def decorator(fn):
fn._sa_instrument_before = ('fire_append_event', arg)
return fn
return decorator
@staticmethod
def replaces(arg):
"""Mark the method as replacing an entity in the collection.
Adds "add to collection" and "remove from collection" handling to
the method. The decorator argument indicates which method argument
holds the SQLAlchemy-relevant value to be added, and return value, if
any will be considered the value to remove.
Arguments can be specified positionally (i.e. integer) or by name::
@collection.replaces(2)
def __setitem__(self, index, item): ...
"""
def decorator(fn):
fn._sa_instrument_before = ('fire_append_event', arg)
fn._sa_instrument_after = 'fire_remove_event'
return fn
return decorator
@staticmethod
def removes(arg):
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value to be removed. Arguments can be specified positionally (i.e.
integer) or by name::
@collection.removes(1)
def zap(self, item): ...
For methods where the value to remove is not known at call-time, use
collection.removes_return.
"""
def decorator(fn):
fn._sa_instrument_before = ('fire_remove_event', arg)
return fn
return decorator
@staticmethod
def removes_return():
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The return
value of the method, if any, is considered the value to remove. The
method arguments are not inspected::
@collection.removes_return()
def pop(self): ...
For methods where the value to remove is known at call-time, use
collection.remove.
"""
def decorator(fn):
fn._sa_instrument_after = 'fire_remove_event'
return fn
return decorator
collection_adapter = operator.attrgetter('_sa_adapter')
"""Fetch the :class:`.CollectionAdapter` for a collection."""
class CollectionAdapter(object):
"""Bridges between the ORM and arbitrary Python collections.
Proxies base-level collection operations (append, remove, iterate)
to the underlying Python collection, and emits add/remove events for
entities entering or leaving the collection.
The ORM uses :class:`.CollectionAdapter` exclusively for interaction with
entity collections.
"""
invalidated = False
def __init__(self, attr, owner_state, data):
self._key = attr.key
self._data = weakref.ref(data)
self.owner_state = owner_state
data._sa_adapter = self
def _warn_invalidated(self):
util.warn("This collection has been invalidated.")
@property
def data(self):
"The entity collection being adapted."
return self._data()
@property
def _referenced_by_owner(self):
"""return True if the owner state still refers to this collection.
This will return False within a bulk replace operation,
where this collection is the one being replaced.
"""
return self.owner_state.dict[self._key] is self._data()
@util.memoized_property
def attr(self):
return self.owner_state.manager[self._key].impl
def adapt_like_to_iterable(self, obj):
"""Converts collection-compatible objects to an iterable of values.
Can be passed any type of object, and if the underlying collection
determines that it can be adapted into a stream of values it can
use, returns an iterable of values suitable for append()ing.
This method may raise TypeError or any other suitable exception
if adaptation fails.
If a converter implementation is not supplied on the collection,
a default duck-typing-based implementation is used.
"""
converter = self._data()._sa_converter
if converter is not None:
return converter(obj)
setting_type = util.duck_type_collection(obj)
receiving_type = util.duck_type_collection(self._data())
if obj is None or setting_type != receiving_type:
given = obj is None and 'None' or obj.__class__.__name__
if receiving_type is None:
wanted = self._data().__class__.__name__
else:
wanted = receiving_type.__name__
raise TypeError(
"Incompatible collection type: %s is not %s-like" % (
given, wanted))
# If the object is an adapted collection, return the (iterable)
# adapter.
if getattr(obj, '_sa_adapter', None) is not None:
return obj._sa_adapter
elif setting_type == dict:
if util.py3k:
return obj.values()
else:
return getattr(obj, 'itervalues', obj.values)()
else:
return iter(obj)
def append_with_event(self, item, initiator=None):
"""Add an entity to the collection, firing mutation events."""
self._data()._sa_appender(item, _sa_initiator=initiator)
def append_without_event(self, item):
"""Add or restore an entity to the collection, firing no events."""
self._data()._sa_appender(item, _sa_initiator=False)
def append_multiple_without_event(self, items):
"""Add or restore an entity to the collection, firing no events."""
appender = self._data()._sa_appender
for item in items:
appender(item, _sa_initiator=False)
def remove_with_event(self, item, initiator=None):
"""Remove an entity from the collection, firing mutation events."""
self._data()._sa_remover(item, _sa_initiator=initiator)
def remove_without_event(self, item):
"""Remove an entity from the collection, firing no events."""
self._data()._sa_remover(item, _sa_initiator=False)
def clear_with_event(self, initiator=None):
"""Empty the collection, firing a mutation event for each entity."""
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=initiator)
def clear_without_event(self):
"""Empty the collection, firing no events."""
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=False)
def __iter__(self):
"""Iterate over entities in the collection."""
return iter(self._data()._sa_iterator())
def __len__(self):
"""Count entities in the collection."""
return len(list(self._data()._sa_iterator()))
def __bool__(self):
return True
__nonzero__ = __bool__
def fire_append_event(self, item, initiator=None):
"""Notify that a entity has entered the collection.
Initiator is a token owned by the InstrumentedAttribute that
initiated the membership mutation, and should be left as None
unless you are passing along an initiator value from a chained
operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
return self.attr.fire_append_event(
self.owner_state,
self.owner_state.dict,
item, initiator)
else:
return item
def fire_remove_event(self, item, initiator=None):
"""Notify that a entity has been removed from the collection.
Initiator is the InstrumentedAttribute that initiated the membership
mutation, and should be left as None unless you are passing along
an initiator value from a chained operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
self.attr.fire_remove_event(
self.owner_state,
self.owner_state.dict,
item, initiator)
def fire_pre_remove_event(self, initiator=None):
"""Notify that an entity is about to be removed from the collection.
Only called if the entity cannot be removed after calling
fire_remove_event().
"""
if self.invalidated:
self._warn_invalidated()
self.attr.fire_pre_remove_event(
self.owner_state,
self.owner_state.dict,
initiator=initiator)
def __getstate__(self):
return {'key': self._key,
'owner_state': self.owner_state,
'data': self.data}
def __setstate__(self, d):
self._key = d['key']
self.owner_state = d['owner_state']
self._data = weakref.ref(d['data'])
def bulk_replace(values, existing_adapter, new_adapter):
"""Load a new collection, firing events based on prior like membership.
Appends instances in ``values`` onto the ``new_adapter``. Events will be
fired for any instance not present in the ``existing_adapter``. Any
instances in ``existing_adapter`` not present in ``values`` will have
remove events fired upon them.
:param values: An iterable of collection member instances
:param existing_adapter: A :class:`.CollectionAdapter` of
instances to be replaced
:param new_adapter: An empty :class:`.CollectionAdapter`
to load with ``values``
"""
if not isinstance(values, list):
values = list(values)
idset = util.IdentitySet
existing_idset = idset(existing_adapter or ())
constants = existing_idset.intersection(values or ())
additions = idset(values or ()).difference(constants)
removals = existing_idset.difference(constants)
for member in values or ():
if member in additions:
new_adapter.append_with_event(member)
elif member in constants:
new_adapter.append_without_event(member)
if existing_adapter:
for member in removals:
existing_adapter.remove_with_event(member)
def prepare_instrumentation(factory):
"""Prepare a callable for future use as a collection class factory.
Given a collection class factory (either a type or no-arg callable),
return another factory that will produce compatible instances when
called.
This function is responsible for converting collection_class=list
into the run-time behavior of collection_class=InstrumentedList.
"""
# Convert a builtin to 'Instrumented*'
if factory in __canned_instrumentation:
factory = __canned_instrumentation[factory]
# Create a specimen
cls = type(factory())
# Did factory callable return a builtin?
if cls in __canned_instrumentation:
# Wrap it so that it returns our 'Instrumented*'
factory = __converting_factory(cls, factory)
cls = factory()
# Instrument the class if needed.
if __instrumentation_mutex.acquire():
try:
if getattr(cls, '_sa_instrumented', None) != id(cls):
_instrument_class(cls)
finally:
__instrumentation_mutex.release()
return factory
def __converting_factory(specimen_cls, original_factory):
"""Return a wrapper that converts a "canned" collection like
set, dict, list into the Instrumented* version.
"""
instrumented_cls = __canned_instrumentation[specimen_cls]
def wrapper():
collection = original_factory()
return instrumented_cls(collection)
# often flawed but better than nothing
wrapper.__name__ = "%sWrapper" % original_factory.__name__
wrapper.__doc__ = original_factory.__doc__
return wrapper
def _instrument_class(cls):
"""Modify methods in a class and install instrumentation."""
# In the normal call flow, a request for any of the 3 basic collection
# types is transformed into one of our trivial subclasses
# (e.g. InstrumentedList). Catch anything else that sneaks in here...
if cls.__module__ == '__builtin__':
raise sa_exc.ArgumentError(
"Can not instrument a built-in type. Use a "
"subclass, even a trivial one.")
roles, methods = _locate_roles_and_methods(cls)
_setup_canned_roles(cls, roles, methods)
_assert_required_roles(cls, roles, methods)
_set_collection_attributes(cls, roles, methods)
def _locate_roles_and_methods(cls):
"""search for _sa_instrument_role-decorated methods in
method resolution order, assign to roles.
"""
roles = {}
methods = {}
for supercls in cls.__mro__:
for name, method in vars(supercls).items():
if not util.callable(method):
continue
# note role declarations
if hasattr(method, '_sa_instrument_role'):
role = method._sa_instrument_role
assert role in ('appender', 'remover', 'iterator',
'linker', 'converter')
roles.setdefault(role, name)
# transfer instrumentation requests from decorated function
# to the combined queue
before, after = None, None
if hasattr(method, '_sa_instrument_before'):
op, argument = method._sa_instrument_before
assert op in ('fire_append_event', 'fire_remove_event')
before = op, argument
if hasattr(method, '_sa_instrument_after'):
op = method._sa_instrument_after
assert op in ('fire_append_event', 'fire_remove_event')
after = op
if before:
methods[name] = before + (after, )
elif after:
methods[name] = None, None, after
return roles, methods
def _setup_canned_roles(cls, roles, methods):
"""see if this class has "canned" roles based on a known
collection type (dict, set, list). Apply those roles
as needed to the "roles" dictionary, and also
prepare "decorator" methods
"""
collection_type = util.duck_type_collection(cls)
if collection_type in __interfaces:
canned_roles, decorators = __interfaces[collection_type]
for role, name in canned_roles.items():
roles.setdefault(role, name)
# apply ABC auto-decoration to methods that need it
for method, decorator in decorators.items():
fn = getattr(cls, method, None)
if (fn and method not in methods and
not hasattr(fn, '_sa_instrumented')):
setattr(cls, method, decorator(fn))
def _assert_required_roles(cls, roles, methods):
"""ensure all roles are present, and apply implicit instrumentation if
needed
"""
if 'appender' not in roles or not hasattr(cls, roles['appender']):
raise sa_exc.ArgumentError(
"Type %s must elect an appender method to be "
"a collection class" % cls.__name__)
elif (roles['appender'] not in methods and
not hasattr(getattr(cls, roles['appender']), '_sa_instrumented')):
methods[roles['appender']] = ('fire_append_event', 1, None)
if 'remover' not in roles or not hasattr(cls, roles['remover']):
raise sa_exc.ArgumentError(
"Type %s must elect a remover method to be "
"a collection class" % cls.__name__)
elif (roles['remover'] not in methods and
not hasattr(getattr(cls, roles['remover']), '_sa_instrumented')):
methods[roles['remover']] = ('fire_remove_event', 1, None)
if 'iterator' not in roles or not hasattr(cls, roles['iterator']):
raise sa_exc.ArgumentError(
"Type %s must elect an iterator method to be "
"a collection class" % cls.__name__)
def _set_collection_attributes(cls, roles, methods):
"""apply ad-hoc instrumentation from decorators, class-level defaults
and implicit role declarations
"""
for method_name, (before, argument, after) in methods.items():
setattr(cls, method_name,
_instrument_membership_mutator(getattr(cls, method_name),
before, argument, after))
# intern the role map
for role, method_name in roles.items():
setattr(cls, '_sa_%s' % role, getattr(cls, method_name))
cls._sa_adapter = None
if not hasattr(cls, '_sa_converter'):
cls._sa_converter = None
cls._sa_instrumented = id(cls)
def _instrument_membership_mutator(method, before, argument, after):
"""Route method args and/or return value through the collection
adapter."""
# This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))'
if before:
fn_args = list(util.flatten_iterator(inspect.getargspec(method)[0]))
if isinstance(argument, int):
pos_arg = argument
named_arg = len(fn_args) > argument and fn_args[argument] or None
else:
if argument in fn_args:
pos_arg = fn_args.index(argument)
else:
pos_arg = None
named_arg = argument
del fn_args
def wrapper(*args, **kw):
if before:
if pos_arg is None:
if named_arg not in kw:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument)
value = kw[named_arg]
else:
if len(args) > pos_arg:
value = args[pos_arg]
elif named_arg in kw:
value = kw[named_arg]
else:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument)
initiator = kw.pop('_sa_initiator', None)
if initiator is False:
executor = None
else:
executor = args[0]._sa_adapter
if before and executor:
getattr(executor, before)(value, initiator)
if not after or not executor:
return method(*args, **kw)
else:
res = method(*args, **kw)
if res is not None:
getattr(executor, after)(res, initiator)
return res
wrapper._sa_instrumented = True
if hasattr(method, "_sa_instrument_role"):
wrapper._sa_instrument_role = method._sa_instrument_role
wrapper.__name__ = method.__name__
wrapper.__doc__ = method.__doc__
return wrapper
def __set(collection, item, _sa_initiator=None):
"""Run set events, may eventually be inlined into decorators."""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
item = executor.fire_append_event(item, _sa_initiator)
return item
def __del(collection, item, _sa_initiator=None):
"""Run del events, may eventually be inlined into decorators."""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
executor.fire_remove_event(item, _sa_initiator)
def __before_delete(collection, _sa_initiator=None):
"""Special method to run 'commit existing value' methods"""
executor = collection._sa_adapter
if executor:
executor.fire_pre_remove_event(_sa_initiator)
def _list_decorators():
"""Tailored instrumentation wrappers for any list-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(list, fn.__name__).__doc__
def append(fn):
def append(self, item, _sa_initiator=None):
item = __set(self, item, _sa_initiator)
fn(self, item)
_tidy(append)
return append
def remove(fn):
def remove(self, value, _sa_initiator=None):
__before_delete(self, _sa_initiator)
# testlib.pragma exempt:__eq__
fn(self, value)
__del(self, value, _sa_initiator)
_tidy(remove)
return remove
def insert(fn):
def insert(self, index, value):
value = __set(self, value)
fn(self, index, value)
_tidy(insert)
return insert
def __setitem__(fn):
def __setitem__(self, index, value):
if not isinstance(index, slice):
existing = self[index]
if existing is not None:
__del(self, existing)
value = __set(self, value)
fn(self, index, value)
else:
# slice assignment requires __delitem__, insert, __len__
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
if index.stop is not None:
stop = index.stop
else:
stop = len(self)
if stop < 0:
stop += len(self)
if step == 1:
for i in range(start, stop, step):
if len(self) > start:
del self[start]
for i, item in enumerate(value):
self.insert(i + start, item)
else:
rng = list(range(start, stop, step))
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s" % (len(value),
len(rng)))
for i, item in zip(rng, value):
self.__setitem__(i, item)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, index):
if not isinstance(index, slice):
item = self[index]
__del(self, item)
fn(self, index)
else:
# slice deletion requires __getslice__ and a slice-groking
# __getitem__ for stepped deletion
# note: not breaking this into atomic dels
for item in self[index]:
__del(self, item)
fn(self, index)
_tidy(__delitem__)
return __delitem__
if util.py2k:
def __setslice__(fn):
def __setslice__(self, start, end, values):
for value in self[start:end]:
__del(self, value)
values = [__set(self, value) for value in values]
fn(self, start, end, values)
_tidy(__setslice__)
return __setslice__
def __delslice__(fn):
def __delslice__(self, start, end):
for value in self[start:end]:
__del(self, value)
fn(self, start, end)
_tidy(__delslice__)
return __delslice__
def extend(fn):
def extend(self, iterable):
for value in iterable:
self.append(value)
_tidy(extend)
return extend
def __iadd__(fn):
def __iadd__(self, iterable):
# list.__iadd__ takes any iterable and seems to let TypeError
# raise as-is instead of returning NotImplemented
for value in iterable:
self.append(value)
return self
_tidy(__iadd__)
return __iadd__
def pop(fn):
def pop(self, index=-1):
__before_delete(self)
item = fn(self, index)
__del(self, item)
return item
_tidy(pop)
return pop
if not util.py2k:
def clear(fn):
def clear(self, index=-1):
for item in self:
__del(self, item)
fn(self)
_tidy(clear)
return clear
# __imul__ : not wrapping this. all members of the collection are already
# present, so no need to fire appends... wrapping it with an explicit
# decorator is still possible, so events on *= can be had if they're
# desired. hard to imagine a use case for __imul__, though.
l = locals().copy()
l.pop('_tidy')
return l
def _dict_decorators():
"""Tailored instrumentation wrappers for any dict-like mapping class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(dict, fn.__name__).__doc__
Unspecified = util.symbol('Unspecified')
def __setitem__(fn):
def __setitem__(self, key, value, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator)
value = __set(self, value, _sa_initiator)
fn(self, key, value)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, key, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator)
fn(self, key)
_tidy(__delitem__)
return __delitem__
def clear(fn):
def clear(self):
for key in self:
__del(self, self[key])
fn(self)
_tidy(clear)
return clear
def pop(fn):
def pop(self, key, default=Unspecified):
if key in self:
__del(self, self[key])
if default is Unspecified:
return fn(self, key)
else:
return fn(self, key, default)
_tidy(pop)
return pop
def popitem(fn):
def popitem(self):
__before_delete(self)
item = fn(self)
__del(self, item[1])
return item
_tidy(popitem)
return popitem
def setdefault(fn):
def setdefault(self, key, default=None):
if key not in self:
self.__setitem__(key, default)
return default
else:
return self.__getitem__(key)
_tidy(setdefault)
return setdefault
def update(fn):
def update(self, __other=Unspecified, **kw):
if __other is not Unspecified:
if hasattr(__other, 'keys'):
for key in list(__other):
if (key not in self or
self[key] is not __other[key]):
self[key] = __other[key]
else:
for key, value in __other:
if key not in self or self[key] is not value:
self[key] = value
for key in kw:
if key not in self or self[key] is not kw[key]:
self[key] = kw[key]
_tidy(update)
return update
l = locals().copy()
l.pop('_tidy')
l.pop('Unspecified')
return l
_set_binop_bases = (set, frozenset)
def _set_binops_check_strict(self, obj):
"""Allow only set, frozenset and self.__class__-derived
objects in binops."""
return isinstance(obj, _set_binop_bases + (self.__class__,))
def _set_binops_check_loose(self, obj):
"""Allow anything set-like to participate in set binops."""
return (isinstance(obj, _set_binop_bases + (self.__class__,)) or
util.duck_type_collection(obj) == set)
def _set_decorators():
"""Tailored instrumentation wrappers for any set-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(set, fn.__name__).__doc__
Unspecified = util.symbol('Unspecified')
def add(fn):
def add(self, value, _sa_initiator=None):
if value not in self:
value = __set(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(add)
return add
def discard(fn):
def discard(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(discard)
return discard
def remove(fn):
def remove(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(remove)
return remove
def pop(fn):
def pop(self):
__before_delete(self)
item = fn(self)
__del(self, item)
return item
_tidy(pop)
return pop
def clear(fn):
def clear(self):
for item in list(self):
self.remove(item)
_tidy(clear)
return clear
def update(fn):
def update(self, value):
for item in value:
self.add(item)
_tidy(update)
return update
def __ior__(fn):
def __ior__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.add(item)
return self
_tidy(__ior__)
return __ior__
def difference_update(fn):
def difference_update(self, value):
for item in value:
self.discard(item)
_tidy(difference_update)
return difference_update
def __isub__(fn):
def __isub__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.discard(item)
return self
_tidy(__isub__)
return __isub__
def intersection_update(fn):
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(intersection_update)
return intersection_update
def __iand__(fn):
def __iand__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__iand__)
return __iand__
def symmetric_difference_update(fn):
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(symmetric_difference_update)
return symmetric_difference_update
def __ixor__(fn):
def __ixor__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__ixor__)
return __ixor__
l = locals().copy()
l.pop('_tidy')
l.pop('Unspecified')
return l
class InstrumentedList(list):
"""An instrumented version of the built-in list."""
class InstrumentedSet(set):
"""An instrumented version of the built-in set."""
class InstrumentedDict(dict):
"""An instrumented version of the built-in dict."""
__canned_instrumentation = {
list: InstrumentedList,
set: InstrumentedSet,
dict: InstrumentedDict,
}
__interfaces = {
list: (
{'appender': 'append', 'remover': 'remove',
'iterator': '__iter__'}, _list_decorators()
),
set: ({'appender': 'add',
'remover': 'remove',
'iterator': '__iter__'}, _set_decorators()
),
# decorators are required for dicts and object collections.
dict: ({'iterator': 'values'}, _dict_decorators()) if util.py3k
else ({'iterator': 'itervalues'}, _dict_decorators()),
}
class MappedCollection(dict):
"""A basic dictionary-based collection class.
Extends dict with the minimal bag semantics that collection
classes require. ``set`` and ``remove`` are implemented in terms
of a keying function: any callable that takes an object and
returns an object for use as a dictionary key.
"""
def __init__(self, keyfunc):
"""Create a new collection with keying provided by keyfunc.
keyfunc may be any callable that takes an object and returns an object
for use as a dictionary key.
The keyfunc will be called every time the ORM needs to add a member by
value-only (such as when loading instances from the database) or
remove a member. The usual cautions about dictionary keying apply-
``keyfunc(object)`` should return the same output for the life of the
collection. Keying based on mutable properties can result in
unreachable instances "lost" in the collection.
"""
self.keyfunc = keyfunc
@collection.appender
@collection.internally_instrumented
def set(self, value, _sa_initiator=None):
"""Add an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
self.__setitem__(key, value, _sa_initiator)
@collection.remover
@collection.internally_instrumented
def remove(self, value, _sa_initiator=None):
"""Remove an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
# Let self[key] raise if key is not in this collection
# testlib.pragma exempt:__ne__
if self[key] != value:
raise sa_exc.InvalidRequestError(
"Can not remove '%s': collection holds '%s' for key '%s'. "
"Possible cause: is the MappedCollection key function "
"based on mutable properties or properties that only obtain "
"values after flush?" %
(value, self[key], key))
self.__delitem__(key, _sa_initiator)
@collection.converter
def _convert(self, dictlike):
"""Validate and convert a dict-like object into values for set()ing.
This is called behind the scenes when a MappedCollection is replaced
entirely by another collection, as in::
myobj.mappedcollection = {'a':obj1, 'b': obj2} # ...
Raises a TypeError if the key in any (key, value) pair in the dictlike
object does not match the key that this collection's keyfunc would
have assigned for that value.
"""
for incoming_key, value in util.dictlike_iteritems(dictlike):
new_key = self.keyfunc(value)
if incoming_key != new_key:
raise TypeError(
"Found incompatible key %r for value %r; this "
"collection's "
"keying function requires a key of %r for this value." % (
incoming_key, value, new_key))
yield value
# ensure instrumentation is associated with
# these built-in classes; if a user-defined class
# subclasses these and uses @internally_instrumented,
# the superclass is otherwise not instrumented.
# see [ticket:2406].
_instrument_class(MappedCollection)
_instrument_class(InstrumentedList)
_instrument_class(InstrumentedSet)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import os
import re
import cgi
import dateutil
import time
import socket
import subprocess
import sys
import logging
import requests
from base64 import b64encode
from collections import OrderedDict
# Django
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.models import Q, Count, F
from django.db import IntegrityError, transaction, connection
from django.shortcuts import get_object_or_404
from django.utils.encoding import smart_text, force_text
from django.utils.safestring import mark_safe
from django.utils.timezone import now
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.cache import never_cache
from django.template.loader import render_to_string
from django.core.servers.basehttp import FileWrapper
from django.http import HttpResponse
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
# Django REST Framework
from rest_framework.exceptions import PermissionDenied, ParseError
from rest_framework.parsers import FormParser
from rest_framework.permissions import AllowAny, IsAuthenticated, SAFE_METHODS
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.views import exception_handler
from rest_framework import status
# Django REST Framework YAML
from rest_framework_yaml.parsers import YAMLParser
from rest_framework_yaml.renderers import YAMLRenderer
# QSStats
import qsstats
# ANSIConv
import ansiconv
# Python Social Auth
from social.backends.utils import load_backends
# AWX
from awx.main.tasks import send_notifications
from awx.main.access import get_user_queryset
from awx.main.ha import is_ha_environment
from awx.api.authentication import TaskAuthentication, TokenGetAuthentication
from awx.api.filters import V1CredentialFilterBackend
from awx.api.generics import get_view_name
from awx.api.generics import * # noqa
from awx.api.versioning import reverse, get_request_version
from awx.conf.license import get_license, feature_enabled, feature_exists, LicenseForbids
from awx.main.models import * # noqa
from awx.main.utils import * # noqa
from awx.main.utils import (
callback_filter_out_ansible_extra_vars,
decrypt_field,
)
from awx.main.utils.filters import SmartFilter
from awx.main.utils.insights import filter_insights_api_response
from awx.api.permissions import * # noqa
from awx.api.renderers import * # noqa
from awx.api.serializers import * # noqa
from awx.api.metadata import RoleMetadata, JobTypeMetadata
from awx.main.consumers import emit_channel_notification
from awx.main.models.unified_jobs import ACTIVE_STATES
from awx.main.scheduler.tasks import run_job_complete
logger = logging.getLogger('awx.api.views')
def api_exception_handler(exc, context):
'''
Override default API exception handler to catch IntegrityError exceptions.
'''
if isinstance(exc, IntegrityError):
exc = ParseError(exc.args[0])
if isinstance(exc, FieldError):
exc = ParseError(exc.args[0])
return exception_handler(exc, context)
class ActivityStreamEnforcementMixin(object):
'''
Mixin to check that license supports activity streams.
'''
def check_permissions(self, request):
if not feature_enabled('activity_streams'):
raise LicenseForbids(_('Your license does not allow use of the activity stream.'))
return super(ActivityStreamEnforcementMixin, self).check_permissions(request)
class SystemTrackingEnforcementMixin(object):
'''
Mixin to check that license supports system tracking.
'''
def check_permissions(self, request):
if not feature_enabled('system_tracking'):
raise LicenseForbids(_('Your license does not permit use of system tracking.'))
return super(SystemTrackingEnforcementMixin, self).check_permissions(request)
class WorkflowsEnforcementMixin(object):
'''
Mixin to check that license supports workflows.
'''
def check_permissions(self, request):
if not feature_enabled('workflows') and request.method not in ('GET', 'OPTIONS', 'DELETE'):
raise LicenseForbids(_('Your license does not allow use of workflows.'))
return super(WorkflowsEnforcementMixin, self).check_permissions(request)
class UnifiedJobDeletionMixin(object):
'''
Special handling when deleting a running unified job object.
'''
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'delete', obj):
raise PermissionDenied()
try:
if obj.unified_job_node.workflow_job.status in ACTIVE_STATES:
raise PermissionDenied(detail=_('Cannot delete job resource when associated workflow job is running.'))
except self.model.unified_job_node.RelatedObjectDoesNotExist:
pass
if obj.status in ACTIVE_STATES:
raise PermissionDenied(detail=_("Cannot delete running job resource."))
obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ApiRootView(APIView):
authentication_classes = []
permission_classes = (AllowAny,)
view_name = _('REST API')
versioning_class = None
def get(self, request, format=None):
''' list supported API versions '''
v1 = reverse('api:api_v1_root_view', kwargs={'version': 'v1'})
v2 = reverse('api:api_v2_root_view', kwargs={'version': 'v2'})
data = dict(
description = _('AWX REST API'),
current_version = v2,
available_versions = dict(v1 = v1, v2 = v2),
)
if feature_enabled('rebranding'):
data['custom_logo'] = settings.CUSTOM_LOGO
data['custom_login_info'] = settings.CUSTOM_LOGIN_INFO
return Response(data)
class ApiVersionRootView(APIView):
authentication_classes = []
permission_classes = (AllowAny,)
def get(self, request, format=None):
''' list top level resources '''
data = OrderedDict()
data['authtoken'] = reverse('api:auth_token_view', request=request)
data['ping'] = reverse('api:api_v1_ping_view', request=request)
data['instances'] = reverse('api:instance_list', request=request)
data['instance_groups'] = reverse('api:instance_group_list', request=request)
data['config'] = reverse('api:api_v1_config_view', request=request)
data['settings'] = reverse('api:setting_category_list', request=request)
data['me'] = reverse('api:user_me_list', request=request)
data['dashboard'] = reverse('api:dashboard_view', request=request)
data['organizations'] = reverse('api:organization_list', request=request)
data['users'] = reverse('api:user_list', request=request)
data['projects'] = reverse('api:project_list', request=request)
data['project_updates'] = reverse('api:project_update_list', request=request)
data['teams'] = reverse('api:team_list', request=request)
data['credentials'] = reverse('api:credential_list', request=request)
if get_request_version(request) > 1:
data['credential_types'] = reverse('api:credential_type_list', request=request)
data['inventory'] = reverse('api:inventory_list', request=request)
data['inventory_scripts'] = reverse('api:inventory_script_list', request=request)
data['inventory_sources'] = reverse('api:inventory_source_list', request=request)
data['inventory_updates'] = reverse('api:inventory_update_list', request=request)
data['groups'] = reverse('api:group_list', request=request)
data['hosts'] = reverse('api:host_list', request=request)
data['job_templates'] = reverse('api:job_template_list', request=request)
data['jobs'] = reverse('api:job_list', request=request)
data['job_events'] = reverse('api:job_event_list', request=request)
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
data['system_job_templates'] = reverse('api:system_job_template_list', request=request)
data['system_jobs'] = reverse('api:system_job_list', request=request)
data['schedules'] = reverse('api:schedule_list', request=request)
data['roles'] = reverse('api:role_list', request=request)
data['notification_templates'] = reverse('api:notification_template_list', request=request)
data['notifications'] = reverse('api:notification_list', request=request)
data['labels'] = reverse('api:label_list', request=request)
data['unified_job_templates'] = reverse('api:unified_job_template_list', request=request)
data['unified_jobs'] = reverse('api:unified_job_list', request=request)
data['activity_stream'] = reverse('api:activity_stream_list', request=request)
data['workflow_job_templates'] = reverse('api:workflow_job_template_list', request=request)
data['workflow_jobs'] = reverse('api:workflow_job_list', request=request)
data['workflow_job_template_nodes'] = reverse('api:workflow_job_template_node_list', request=request)
data['workflow_job_nodes'] = reverse('api:workflow_job_node_list', request=request)
return Response(data)
class ApiV1RootView(ApiVersionRootView):
view_name = _('Version 1')
class ApiV2RootView(ApiVersionRootView):
view_name = _('Version 2')
new_in_320 = True
new_in_api_v2 = True
class ApiV1PingView(APIView):
"""A simple view that reports very basic information about this
instance, which is acceptable to be public information.
"""
permission_classes = (AllowAny,)
authentication_classes = ()
view_name = _('Ping')
new_in_210 = True
def get(self, request, format=None):
"""Return some basic information about this instance.
Everything returned here should be considered public / insecure, as
this requires no auth and is intended for use by the installer process.
"""
response = {
'ha': is_ha_environment(),
'version': get_awx_version(),
'active_node': settings.CLUSTER_HOST_ID,
}
response['instances'] = []
for instance in Instance.objects.all():
response['instances'].append(dict(node=instance.hostname, heartbeat=instance.modified,
capacity=instance.capacity, version=instance.version))
response['instances'].sort()
response['instance_groups'] = []
for instance_group in InstanceGroup.objects.all():
response['instance_groups'].append(dict(name=instance_group.name,
capacity=instance_group.capacity,
instances=[x.hostname for x in instance_group.instances.all()]))
return Response(response)
class ApiV1ConfigView(APIView):
permission_classes = (IsAuthenticated,)
view_name = _('Configuration')
def check_permissions(self, request):
super(ApiV1ConfigView, self).check_permissions(request)
if not request.user.is_superuser and request.method.lower() not in {'options', 'head', 'get'}:
self.permission_denied(request) # Raises PermissionDenied exception.
def get(self, request, format=None):
'''Return various sitewide configuration settings.'''
if request.user.is_superuser or request.user.is_system_auditor:
license_data = get_license(show_key=True)
else:
license_data = get_license(show_key=False)
if not license_data.get('valid_key', False):
license_data = {}
if license_data and 'features' in license_data and 'activity_streams' in license_data['features']:
# FIXME: Make the final setting value dependent on the feature?
license_data['features']['activity_streams'] &= settings.ACTIVITY_STREAM_ENABLED
pendo_state = settings.PENDO_TRACKING_STATE if settings.PENDO_TRACKING_STATE in ('off', 'anonymous', 'detailed') else 'off'
data = dict(
time_zone=settings.TIME_ZONE,
license_info=license_data,
version=get_awx_version(),
ansible_version=get_ansible_version(),
eula=render_to_string("eula.md") if license_data.get('license_type', 'UNLICENSED') != 'open' else '',
analytics_status=pendo_state
)
# If LDAP is enabled, user_ldap_fields will return a list of field
# names that are managed by LDAP and should be read-only for users with
# a non-empty ldap_dn attribute.
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None) and feature_enabled('ldap'):
user_ldap_fields = ['username', 'password']
user_ldap_fields.extend(getattr(settings, 'AUTH_LDAP_USER_ATTR_MAP', {}).keys())
user_ldap_fields.extend(getattr(settings, 'AUTH_LDAP_USER_FLAGS_BY_GROUP', {}).keys())
data['user_ldap_fields'] = user_ldap_fields
if request.user.is_superuser \
or request.user.is_system_auditor \
or Organization.accessible_objects(request.user, 'admin_role').exists() \
or Organization.accessible_objects(request.user, 'auditor_role').exists():
data.update(dict(
project_base_dir = settings.PROJECTS_ROOT,
project_local_paths = Project.get_local_path_choices(),
))
return Response(data)
def post(self, request):
if not isinstance(request.data, dict):
return Response({"error": _("Invalid license data")}, status=status.HTTP_400_BAD_REQUEST)
if "eula_accepted" not in request.data:
return Response({"error": _("Missing 'eula_accepted' property")}, status=status.HTTP_400_BAD_REQUEST)
try:
eula_accepted = to_python_boolean(request.data["eula_accepted"])
except ValueError:
return Response({"error": _("'eula_accepted' value is invalid")}, status=status.HTTP_400_BAD_REQUEST)
if not eula_accepted:
return Response({"error": _("'eula_accepted' must be True")}, status=status.HTTP_400_BAD_REQUEST)
request.data.pop("eula_accepted")
try:
data_actual = json.dumps(request.data)
except Exception:
logger.info(smart_text(u"Invalid JSON submitted for license."),
extra=dict(actor=request.user.username))
return Response({"error": _("Invalid JSON")}, status=status.HTTP_400_BAD_REQUEST)
try:
from awx.main.utils.common import get_licenser
license_data = json.loads(data_actual)
license_data_validated = get_licenser(**license_data).validate()
except Exception:
logger.warning(smart_text(u"Invalid license submitted."),
extra=dict(actor=request.user.username))
return Response({"error": _("Invalid License")}, status=status.HTTP_400_BAD_REQUEST)
# If the license is valid, write it to the database.
if license_data_validated['valid_key']:
settings.LICENSE = license_data
settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host())
return Response(license_data_validated)
logger.warning(smart_text(u"Invalid license submitted."),
extra=dict(actor=request.user.username))
return Response({"error": _("Invalid license")}, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request):
try:
settings.LICENSE = {}
return Response(status=status.HTTP_204_NO_CONTENT)
except:
# FIX: Log
return Response({"error": _("Failed to remove license (%s)") % has_error}, status=status.HTTP_400_BAD_REQUEST)
class DashboardView(APIView):
view_name = _("Dashboard")
new_in_14 = True
def get(self, request, format=None):
''' Show Dashboard Details '''
data = OrderedDict()
data['related'] = {'jobs_graph': reverse('api:dashboard_jobs_graph_view', request=request)}
user_inventory = get_user_queryset(request.user, Inventory)
inventory_with_failed_hosts = user_inventory.filter(hosts_with_active_failures__gt=0)
user_inventory_external = user_inventory.filter(has_inventory_sources=True)
failed_inventory = sum(i.inventory_sources_with_failures for i in user_inventory)
data['inventories'] = {'url': reverse('api:inventory_list', request=request),
'total': user_inventory.count(),
'total_with_inventory_source': user_inventory_external.count(),
'job_failed': inventory_with_failed_hosts.count(),
'inventory_failed': failed_inventory}
user_inventory_sources = get_user_queryset(request.user, InventorySource)
ec2_inventory_sources = user_inventory_sources.filter(source='ec2')
ec2_inventory_failed = ec2_inventory_sources.filter(status='failed')
data['inventory_sources'] = {}
data['inventory_sources']['ec2'] = {'url': reverse('api:inventory_source_list', request=request) + "?source=ec2",
'failures_url': reverse('api:inventory_source_list', request=request) + "?source=ec2&status=failed",
'label': 'Amazon EC2',
'total': ec2_inventory_sources.count(),
'failed': ec2_inventory_failed.count()}
user_groups = get_user_queryset(request.user, Group)
groups_job_failed = (Group.objects.filter(hosts_with_active_failures__gt=0) | Group.objects.filter(groups_with_active_failures__gt=0)).count()
groups_inventory_failed = Group.objects.filter(inventory_sources__last_job_failed=True).count()
data['groups'] = {'url': reverse('api:group_list', request=request),
'failures_url': reverse('api:group_list', request=request) + "?has_active_failures=True",
'total': user_groups.count(),
'job_failed': groups_job_failed,
'inventory_failed': groups_inventory_failed}
user_hosts = get_user_queryset(request.user, Host)
user_hosts_failed = user_hosts.filter(has_active_failures=True)
data['hosts'] = {'url': reverse('api:host_list', request=request),
'failures_url': reverse('api:host_list', request=request) + "?has_active_failures=True",
'total': user_hosts.count(),
'failed': user_hosts_failed.count()}
user_projects = get_user_queryset(request.user, Project)
user_projects_failed = user_projects.filter(last_job_failed=True)
data['projects'] = {'url': reverse('api:project_list', request=request),
'failures_url': reverse('api:project_list', request=request) + "?last_job_failed=True",
'total': user_projects.count(),
'failed': user_projects_failed.count()}
git_projects = user_projects.filter(scm_type='git')
git_failed_projects = git_projects.filter(last_job_failed=True)
svn_projects = user_projects.filter(scm_type='svn')
svn_failed_projects = svn_projects.filter(last_job_failed=True)
hg_projects = user_projects.filter(scm_type='hg')
hg_failed_projects = hg_projects.filter(last_job_failed=True)
data['scm_types'] = {}
data['scm_types']['git'] = {'url': reverse('api:project_list', request=request) + "?scm_type=git",
'label': 'Git',
'failures_url': reverse('api:project_list', request=request) + "?scm_type=git&last_job_failed=True",
'total': git_projects.count(),
'failed': git_failed_projects.count()}
data['scm_types']['svn'] = {'url': reverse('api:project_list', request=request) + "?scm_type=svn",
'label': 'Subversion',
'failures_url': reverse('api:project_list', request=request) + "?scm_type=svn&last_job_failed=True",
'total': svn_projects.count(),
'failed': svn_failed_projects.count()}
data['scm_types']['hg'] = {'url': reverse('api:project_list', request=request) + "?scm_type=hg",
'label': 'Mercurial',
'failures_url': reverse('api:project_list', request=request) + "?scm_type=hg&last_job_failed=True",
'total': hg_projects.count(),
'failed': hg_failed_projects.count()}
user_jobs = get_user_queryset(request.user, Job)
user_failed_jobs = user_jobs.filter(failed=True)
data['jobs'] = {'url': reverse('api:job_list', request=request),
'failure_url': reverse('api:job_list', request=request) + "?failed=True",
'total': user_jobs.count(),
'failed': user_failed_jobs.count()}
user_list = get_user_queryset(request.user, User)
team_list = get_user_queryset(request.user, Team)
credential_list = get_user_queryset(request.user, Credential)
job_template_list = get_user_queryset(request.user, JobTemplate)
organization_list = get_user_queryset(request.user, Organization)
data['users'] = {'url': reverse('api:user_list', request=request),
'total': user_list.count()}
data['organizations'] = {'url': reverse('api:organization_list', request=request),
'total': organization_list.count()}
data['teams'] = {'url': reverse('api:team_list', request=request),
'total': team_list.count()}
data['credentials'] = {'url': reverse('api:credential_list', request=request),
'total': credential_list.count()}
data['job_templates'] = {'url': reverse('api:job_template_list', request=request),
'total': job_template_list.count()}
return Response(data)
class DashboardJobsGraphView(APIView):
view_name = _("Dashboard Jobs Graphs")
new_in_200 = True
def get(self, request, format=None):
period = request.query_params.get('period', 'month')
job_type = request.query_params.get('job_type', 'all')
user_unified_jobs = get_user_queryset(request.user, UnifiedJob)
success_query = user_unified_jobs.filter(status='successful')
failed_query = user_unified_jobs.filter(status='failed')
if job_type == 'inv_sync':
success_query = success_query.filter(instance_of=InventoryUpdate)
failed_query = failed_query.filter(instance_of=InventoryUpdate)
elif job_type == 'playbook_run':
success_query = success_query.filter(instance_of=Job)
failed_query = failed_query.filter(instance_of=Job)
elif job_type == 'scm_update':
success_query = success_query.filter(instance_of=ProjectUpdate)
failed_query = failed_query.filter(instance_of=ProjectUpdate)
success_qss = qsstats.QuerySetStats(success_query, 'finished')
failed_qss = qsstats.QuerySetStats(failed_query, 'finished')
start_date = now()
if period == 'month':
end_date = start_date - dateutil.relativedelta.relativedelta(months=1)
interval = 'days'
elif period == 'week':
end_date = start_date - dateutil.relativedelta.relativedelta(weeks=1)
interval = 'days'
elif period == 'day':
end_date = start_date - dateutil.relativedelta.relativedelta(days=1)
interval = 'hours'
else:
return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST)
dashboard_data = {"jobs": {"successful": [], "failed": []}}
for element in success_qss.time_series(end_date, start_date, interval=interval):
dashboard_data['jobs']['successful'].append([time.mktime(element[0].timetuple()),
element[1]])
for element in failed_qss.time_series(end_date, start_date, interval=interval):
dashboard_data['jobs']['failed'].append([time.mktime(element[0].timetuple()),
element[1]])
return Response(dashboard_data)
class InstanceList(ListAPIView):
view_name = _("Instances")
model = Instance
serializer_class = InstanceSerializer
new_in_320 = True
class InstanceDetail(RetrieveAPIView):
view_name = _("Instance Detail")
model = Instance
serializer_class = InstanceSerializer
new_in_320 = True
class InstanceUnifiedJobsList(SubListAPIView):
view_name = _("Instance Running Jobs")
model = UnifiedJob
serializer_class = UnifiedJobSerializer
parent_model = Instance
new_in_320 = True
def get_queryset(self):
po = self.get_parent_object()
qs = get_user_queryset(self.request.user, UnifiedJob)
qs = qs.filter(execution_node=po.hostname)
return qs
class InstanceInstanceGroupsList(SubListAPIView):
view_name = _("Instance's Instance Groups")
model = InstanceGroup
serializer_class = InstanceGroupSerializer
parent_model = Instance
new_in_320 = True
relationship = 'rampart_groups'
class InstanceGroupList(ListAPIView):
view_name = _("Instance Groups")
model = InstanceGroup
serializer_class = InstanceGroupSerializer
new_in_320 = True
class InstanceGroupDetail(RetrieveAPIView):
view_name = _("Instance Group Detail")
model = InstanceGroup
serializer_class = InstanceGroupSerializer
new_in_320 = True
class InstanceGroupUnifiedJobsList(SubListAPIView):
view_name = _("Instance Group Running Jobs")
model = UnifiedJob
serializer_class = UnifiedJobSerializer
parent_model = InstanceGroup
relationship = "unifiedjob_set"
new_in_320 = True
class InstanceGroupInstanceList(SubListAPIView):
view_name = _("Instance Group's Instances")
model = Instance
serializer_class = InstanceSerializer
parent_model = InstanceGroup
new_in_320 = True
relationship = "instances"
class ScheduleList(ListAPIView):
view_name = _("Schedules")
model = Schedule
serializer_class = ScheduleSerializer
new_in_148 = True
class ScheduleDetail(RetrieveUpdateDestroyAPIView):
model = Schedule
serializer_class = ScheduleSerializer
new_in_148 = True
class ScheduleUnifiedJobsList(SubListAPIView):
model = UnifiedJob
serializer_class = UnifiedJobSerializer
parent_model = Schedule
relationship = 'unifiedjob_set'
view_name = _('Schedule Jobs List')
new_in_148 = True
class AuthView(APIView):
authentication_classes = []
permission_classes = (AllowAny,)
new_in_240 = True
def get(self, request):
from rest_framework.reverse import reverse
data = OrderedDict()
err_backend, err_message = request.session.get('social_auth_error', (None, None))
auth_backends = load_backends(settings.AUTHENTICATION_BACKENDS, force_load=True).items()
# Return auth backends in consistent order: Google, GitHub, SAML.
auth_backends.sort(key=lambda x: 'g' if x[0] == 'google-oauth2' else x[0])
for name, backend in auth_backends:
if (not feature_exists('enterprise_auth') and
not feature_enabled('ldap')) or \
(not feature_enabled('enterprise_auth') and
name in ['saml', 'radius']):
continue
login_url = reverse('social:begin', args=(name,))
complete_url = request.build_absolute_uri(reverse('social:complete', args=(name,)))
backend_data = {
'login_url': login_url,
'complete_url': complete_url,
}
if name == 'saml':
backend_data['metadata_url'] = reverse('sso:saml_metadata')
for idp in sorted(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS.keys()):
saml_backend_data = dict(backend_data.items())
saml_backend_data['login_url'] = '%s?idp=%s' % (login_url, idp)
full_backend_name = '%s:%s' % (name, idp)
if (err_backend == full_backend_name or err_backend == name) and err_message:
saml_backend_data['error'] = err_message
data[full_backend_name] = saml_backend_data
else:
if err_backend == name and err_message:
backend_data['error'] = err_message
data[name] = backend_data
return Response(data)
class AuthTokenView(APIView):
authentication_classes = []
permission_classes = (AllowAny,)
serializer_class = AuthTokenSerializer
model = AuthToken
def get_serializer(self, *args, **kwargs):
serializer = self.serializer_class(*args, **kwargs)
# Override when called from browsable API to generate raw data form;
# update serializer "validated" data to be displayed by the raw data
# form.
if hasattr(self, '_raw_data_form_marker'):
# Always remove read only fields from serializer.
for name, field in serializer.fields.items():
if getattr(field, 'read_only', None):
del serializer.fields[name]
serializer._data = self.update_raw_data(serializer.data)
return serializer
@never_cache
def post(self, request):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
request_hash = AuthToken.get_request_hash(self.request)
try:
token = AuthToken.objects.filter(user=serializer.validated_data['user'],
request_hash=request_hash,
expires__gt=now(),
reason='')[0]
token.refresh()
if 'username' in request.data:
logger.info(smart_text(u"User {} logged in".format(request.data['username'])),
extra=dict(actor=request.data['username']))
except IndexError:
token = AuthToken.objects.create(user=serializer.validated_data['user'],
request_hash=request_hash)
if 'username' in request.data:
logger.info(smart_text(u"User {} logged in".format(request.data['username'])),
extra=dict(actor=request.data['username']))
# Get user un-expired tokens that are not invalidated that are
# over the configured limit.
# Mark them as invalid and inform the user
invalid_tokens = AuthToken.get_tokens_over_limit(serializer.validated_data['user'])
for t in invalid_tokens:
emit_channel_notification('control-limit_reached', dict(group_name='control',
reason=force_text(AuthToken.reason_long('limit_reached')),
token_key=t.key))
t.invalidate(reason='limit_reached')
# Note: This header is normally added in the middleware whenever an
# auth token is included in the request header.
headers = {
'Auth-Token-Timeout': int(settings.AUTH_TOKEN_EXPIRATION),
'Pragma': 'no-cache',
}
return Response({'token': token.key, 'expires': token.expires}, headers=headers)
if 'username' in request.data:
logger.warning(smart_text(u"Login failed for user {}".format(request.data['username'])),
extra=dict(actor=request.data['username']))
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request):
if 'HTTP_AUTHORIZATION' in request.META:
token_match = re.match("Token\s(.+)", request.META['HTTP_AUTHORIZATION'])
if token_match:
filter_tokens = AuthToken.objects.filter(key=token_match.groups()[0])
if filter_tokens.exists():
filter_tokens[0].invalidate()
return Response(status=status.HTTP_204_NO_CONTENT)
class OrganizationCountsMixin(object):
def get_serializer_context(self, *args, **kwargs):
full_context = super(OrganizationCountsMixin, self).get_serializer_context(*args, **kwargs)
if self.request is None:
return full_context
db_results = {}
org_qs = self.model.accessible_objects(self.request.user, 'read_role')
org_id_list = org_qs.values('id')
if len(org_id_list) == 0:
if self.request.method == 'POST':
full_context['related_field_counts'] = {}
return full_context
inv_qs = Inventory.accessible_objects(self.request.user, 'read_role')
project_qs = Project.accessible_objects(self.request.user, 'read_role')
# Produce counts of Foreign Key relationships
db_results['inventories'] = inv_qs\
.values('organization').annotate(Count('organization')).order_by('organization')
db_results['teams'] = Team.accessible_objects(
self.request.user, 'read_role').values('organization').annotate(
Count('organization')).order_by('organization')
JT_project_reference = 'project__organization'
JT_inventory_reference = 'inventory__organization'
db_results['job_templates_project'] = JobTemplate.accessible_objects(
self.request.user, 'read_role').exclude(
project__organization=F(JT_inventory_reference)).values(JT_project_reference).annotate(
Count(JT_project_reference)).order_by(JT_project_reference)
db_results['job_templates_inventory'] = JobTemplate.accessible_objects(
self.request.user, 'read_role').values(JT_inventory_reference).annotate(
Count(JT_inventory_reference)).order_by(JT_inventory_reference)
db_results['projects'] = project_qs\
.values('organization').annotate(Count('organization')).order_by('organization')
# Other members and admins of organization are always viewable
db_results['users'] = org_qs.annotate(
users=Count('member_role__members', distinct=True),
admins=Count('admin_role__members', distinct=True)
).values('id', 'users', 'admins')
count_context = {}
for org in org_id_list:
org_id = org['id']
count_context[org_id] = {
'inventories': 0, 'teams': 0, 'users': 0, 'job_templates': 0,
'admins': 0, 'projects': 0}
for res, count_qs in db_results.items():
if res == 'job_templates_project':
org_reference = JT_project_reference
elif res == 'job_templates_inventory':
org_reference = JT_inventory_reference
elif res == 'users':
org_reference = 'id'
else:
org_reference = 'organization'
for entry in count_qs:
org_id = entry[org_reference]
if org_id in count_context:
if res == 'users':
count_context[org_id]['admins'] = entry['admins']
count_context[org_id]['users'] = entry['users']
continue
count_context[org_id][res] = entry['%s__count' % org_reference]
# Combine the counts for job templates by project and inventory
for org in org_id_list:
org_id = org['id']
count_context[org_id]['job_templates'] = 0
for related_path in ['job_templates_project', 'job_templates_inventory']:
if related_path in count_context[org_id]:
count_context[org_id]['job_templates'] += count_context[org_id].pop(related_path)
full_context['related_field_counts'] = count_context
return full_context
class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
model = Organization
serializer_class = OrganizationSerializer
def get_queryset(self):
qs = Organization.accessible_objects(self.request.user, 'read_role')
qs = qs.select_related('admin_role', 'auditor_role', 'member_role', 'read_role')
qs = qs.prefetch_related('created_by', 'modified_by')
return qs
def create(self, request, *args, **kwargs):
"""Create a new organzation.
If there is already an organization and the license of this
instance does not permit multiple organizations, then raise
LicenseForbids.
"""
# Sanity check: If the multiple organizations feature is disallowed
# by the license, then we are only willing to create this organization
# if no organizations exist in the system.
if (not feature_enabled('multiple_organizations') and
self.model.objects.exists()):
raise LicenseForbids(_('Your license only permits a single '
'organization to exist.'))
# Okay, create the organization as usual.
return super(OrganizationList, self).create(request, *args, **kwargs)
class OrganizationDetail(RetrieveUpdateDestroyAPIView):
model = Organization
serializer_class = OrganizationSerializer
def get_serializer_context(self, *args, **kwargs):
full_context = super(OrganizationDetail, self).get_serializer_context(*args, **kwargs)
if not hasattr(self, 'kwargs'):
return full_context
org_id = int(self.kwargs['pk'])
org_counts = {}
access_kwargs = {'accessor': self.request.user, 'role_field': 'read_role'}
direct_counts = Organization.objects.filter(id=org_id).annotate(
users=Count('member_role__members', distinct=True),
admins=Count('admin_role__members', distinct=True)
).values('users', 'admins')
if not direct_counts:
return full_context
org_counts = direct_counts[0]
org_counts['inventories'] = Inventory.accessible_objects(**access_kwargs).filter(
organization__id=org_id).count()
org_counts['teams'] = Team.accessible_objects(**access_kwargs).filter(
organization__id=org_id).count()
org_counts['projects'] = Project.accessible_objects(**access_kwargs).filter(
organization__id=org_id).count()
org_counts['job_templates'] = JobTemplate.accessible_objects(**access_kwargs).filter(
project__organization__id=org_id).count()
full_context['related_field_counts'] = {}
full_context['related_field_counts'][org_id] = org_counts
return full_context
class OrganizationInventoriesList(SubListAPIView):
model = Inventory
serializer_class = InventorySerializer
parent_model = Organization
relationship = 'inventories'
class BaseUsersList(SubListCreateAttachDetachAPIView):
def post(self, request, *args, **kwargs):
ret = super(BaseUsersList, self).post( request, *args, **kwargs)
try:
if ret.data is not None and request.data.get('is_system_auditor', False):
# This is a faux-field that just maps to checking the system
# auditor role member list.. unfortunately this means we can't
# set it on creation, and thus needs to be set here.
user = User.objects.get(id=ret.data['id'])
user.is_system_auditor = request.data['is_system_auditor']
ret.data['is_system_auditor'] = request.data['is_system_auditor']
except AttributeError as exc:
print(exc)
pass
return ret
class OrganizationUsersList(BaseUsersList):
model = User
serializer_class = UserSerializer
parent_model = Organization
relationship = 'member_role.members'
class OrganizationAdminsList(BaseUsersList):
model = User
serializer_class = UserSerializer
parent_model = Organization
relationship = 'admin_role.members'
class OrganizationProjectsList(SubListCreateAttachDetachAPIView):
model = Project
serializer_class = ProjectSerializer
parent_model = Organization
relationship = 'projects'
parent_key = 'organization'
class OrganizationWorkflowJobTemplatesList(SubListCreateAttachDetachAPIView):
model = WorkflowJobTemplate
serializer_class = WorkflowJobTemplateListSerializer
parent_model = Organization
relationship = 'workflows'
parent_key = 'organization'
new_in_310 = True
class OrganizationTeamsList(SubListCreateAttachDetachAPIView):
model = Team
serializer_class = TeamSerializer
parent_model = Organization
relationship = 'teams'
parent_key = 'organization'
class OrganizationActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = Organization
relationship = 'activitystream_set'
new_in_145 = True
class OrganizationNotificationTemplatesList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = Organization
relationship = 'notification_templates'
parent_key = 'organization'
class OrganizationNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = Organization
relationship = 'notification_templates_any'
new_in_300 = True
class OrganizationNotificationTemplatesErrorList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = Organization
relationship = 'notification_templates_error'
new_in_300 = True
class OrganizationNotificationTemplatesSuccessList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = Organization
relationship = 'notification_templates_success'
new_in_300 = True
class OrganizationInstanceGroupsList(SubListAttachDetachAPIView):
model = InstanceGroup
serializer_class = InstanceGroupSerializer
parent_model = Organization
relationship = 'instance_groups'
new_in_320 = True
class OrganizationAccessList(ResourceAccessList):
model = User # needs to be User for AccessLists's
parent_model = Organization
new_in_300 = True
class OrganizationObjectRolesList(SubListAPIView):
model = Role
serializer_class = RoleSerializer
parent_model = Organization
new_in_300 = True
def get_queryset(self):
po = self.get_parent_object()
content_type = ContentType.objects.get_for_model(self.parent_model)
return Role.objects.filter(content_type=content_type, object_id=po.pk)
class TeamList(ListCreateAPIView):
model = Team
serializer_class = TeamSerializer
def get_queryset(self):
qs = Team.accessible_objects(self.request.user, 'read_role').order_by()
qs = qs.select_related('admin_role', 'read_role', 'member_role', 'organization')
return qs
class TeamDetail(RetrieveUpdateDestroyAPIView):
model = Team
serializer_class = TeamSerializer
class TeamUsersList(BaseUsersList):
model = User
serializer_class = UserSerializer
parent_model = Team
relationship = 'member_role.members'
class TeamRolesList(SubListAttachDetachAPIView):
model = Role
serializer_class = RoleSerializerWithParentAccess
metadata_class = RoleMetadata
parent_model = Team
relationship='member_role.children'
new_in_300 = True
def get_queryset(self):
team = get_object_or_404(Team, pk=self.kwargs['pk'])
if not self.request.user.can_access(Team, 'read', team):
raise PermissionDenied()
return Role.filter_visible_roles(self.request.user, team.member_role.children.all().exclude(pk=team.read_role.pk))
def post(self, request, *args, **kwargs):
sub_id = request.data.get('id', None)
if not sub_id:
return super(TeamRolesList, self).post(request)
role = get_object_or_400(Role, pk=sub_id)
org_content_type = ContentType.objects.get_for_model(Organization)
if role.content_type == org_content_type:
data = dict(msg=_("You cannot assign an Organization role as a child role for a Team."))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
if role.is_singleton():
data = dict(msg=_("You cannot grant system-level permissions to a team."))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
team = get_object_or_404(Team, pk=self.kwargs['pk'])
credential_content_type = ContentType.objects.get_for_model(Credential)
if role.content_type == credential_content_type:
if not role.content_object.organization or role.content_object.organization.id != team.organization.id:
data = dict(msg=_("You cannot grant credential access to a team when the Organization field isn't set, or belongs to a different organization"))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
return super(TeamRolesList, self).post(request, *args, **kwargs)
class TeamObjectRolesList(SubListAPIView):
model = Role
serializer_class = RoleSerializer
parent_model = Team
new_in_300 = True
def get_queryset(self):
po = self.get_parent_object()
content_type = ContentType.objects.get_for_model(self.parent_model)
return Role.objects.filter(content_type=content_type, object_id=po.pk)
class TeamProjectsList(SubListAPIView):
model = Project
serializer_class = ProjectSerializer
parent_model = Team
def get_queryset(self):
team = self.get_parent_object()
self.check_parent_access(team)
model_ct = ContentType.objects.get_for_model(self.model)
parent_ct = ContentType.objects.get_for_model(self.parent_model)
proj_roles = Role.objects.filter(
Q(ancestors__content_type=parent_ct) & Q(ancestors__object_id=team.pk),
content_type=model_ct
)
return self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=[t.content_object.pk for t in proj_roles])
class TeamActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = Team
relationship = 'activitystream_set'
new_in_145 = True
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(Q(team=parent) |
Q(project__in=Project.accessible_objects(parent, 'read_role')) |
Q(credential__in=Credential.accessible_objects(parent, 'read_role')))
class TeamAccessList(ResourceAccessList):
model = User # needs to be User for AccessLists's
parent_model = Team
new_in_300 = True
class ProjectList(ListCreateAPIView):
model = Project
serializer_class = ProjectSerializer
capabilities_prefetch = ['admin', 'update']
def get_queryset(self):
projects_qs = Project.accessible_objects(self.request.user, 'read_role')
projects_qs = projects_qs.select_related(
'organization',
'admin_role',
'use_role',
'update_role',
'read_role',
)
projects_qs = projects_qs.prefetch_related('last_job', 'created_by')
return projects_qs
class ProjectDetail(RetrieveUpdateDestroyAPIView):
model = Project
serializer_class = ProjectSerializer
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
can_delete = request.user.can_access(Project, 'delete', obj)
if not can_delete:
raise PermissionDenied(_("Cannot delete project."))
for pu in obj.project_updates.filter(status__in=['new', 'pending', 'waiting', 'running']):
pu.cancel()
return super(ProjectDetail, self).destroy(request, *args, **kwargs)
class ProjectPlaybooks(RetrieveAPIView):
model = Project
serializer_class = ProjectPlaybooksSerializer
class ProjectInventories(RetrieveAPIView):
model = Project
serializer_class = ProjectInventoriesSerializer
class ProjectTeamsList(ListAPIView):
model = Team
serializer_class = TeamSerializer
def get_queryset(self):
p = get_object_or_404(Project, pk=self.kwargs['pk'])
if not self.request.user.can_access(Project, 'read', p):
raise PermissionDenied()
project_ct = ContentType.objects.get_for_model(Project)
team_ct = ContentType.objects.get_for_model(self.model)
all_roles = Role.objects.filter(Q(descendents__content_type=project_ct) & Q(descendents__object_id=p.pk), content_type=team_ct)
return self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=[t.content_object.pk for t in all_roles])
class ProjectSchedulesList(SubListCreateAPIView):
view_name = _("Project Schedules")
model = Schedule
serializer_class = ScheduleSerializer
parent_model = Project
relationship = 'schedules'
parent_key = 'unified_job_template'
new_in_148 = True
class ProjectScmInventorySources(SubListAPIView):
view_name = _("Project SCM Inventory Sources")
model = InventorySource
serializer_class = InventorySourceSerializer
parent_model = Project
relationship = 'scm_inventory_sources'
parent_key = 'source_project'
new_in_320 = True
class ProjectActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = Project
relationship = 'activitystream_set'
new_in_145 = True
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
if parent is None:
return qs
elif parent.credential is None:
return qs.filter(project=parent)
return qs.filter(Q(project=parent) | Q(credential=parent.credential))
class ProjectNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = Project
relationship = 'notification_templates_any'
new_in_300 = True
class ProjectNotificationTemplatesErrorList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = Project
relationship = 'notification_templates_error'
new_in_300 = True
class ProjectNotificationTemplatesSuccessList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = Project
relationship = 'notification_templates_success'
new_in_300 = True
class ProjectUpdatesList(SubListAPIView):
model = ProjectUpdate
serializer_class = ProjectUpdateSerializer
parent_model = Project
relationship = 'project_updates'
new_in_13 = True
class ProjectUpdateView(RetrieveAPIView):
model = Project
serializer_class = ProjectUpdateViewSerializer
permission_classes = (ProjectUpdatePermission,)
new_in_13 = True
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_update:
project_update = obj.update()
if not project_update:
return Response({}, status=status.HTTP_400_BAD_REQUEST)
else:
headers = {'Location': project_update.get_absolute_url(request=request)}
return Response({'project_update': project_update.id},
headers=headers,
status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class ProjectUpdateList(ListAPIView):
model = ProjectUpdate
serializer_class = ProjectUpdateListSerializer
new_in_13 = True
class ProjectUpdateDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
model = ProjectUpdate
serializer_class = ProjectUpdateSerializer
new_in_13 = True
class ProjectUpdateCancel(RetrieveAPIView):
model = ProjectUpdate
serializer_class = ProjectUpdateCancelSerializer
is_job_cancel = True
new_in_13 = True
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class ProjectUpdateNotificationsList(SubListAPIView):
model = Notification
serializer_class = NotificationSerializer
parent_model = ProjectUpdate
relationship = 'notifications'
new_in_300 = True
class ProjectUpdateScmInventoryUpdates(SubListCreateAPIView):
view_name = _("Project Update SCM Inventory Updates")
model = InventoryUpdate
serializer_class = InventoryUpdateSerializer
parent_model = ProjectUpdate
relationship = 'scm_inventory_updates'
parent_key = 'source_project_update'
new_in_320 = True
class ProjectAccessList(ResourceAccessList):
model = User # needs to be User for AccessLists's
parent_model = Project
new_in_300 = True
class ProjectObjectRolesList(SubListAPIView):
model = Role
serializer_class = RoleSerializer
parent_model = Project
new_in_300 = True
def get_queryset(self):
po = self.get_parent_object()
content_type = ContentType.objects.get_for_model(self.parent_model)
return Role.objects.filter(content_type=content_type, object_id=po.pk)
class UserList(ListCreateAPIView):
model = User
serializer_class = UserSerializer
permission_classes = (UserPermission,)
def post(self, request, *args, **kwargs):
ret = super(UserList, self).post( request, *args, **kwargs)
try:
if request.data.get('is_system_auditor', False):
# This is a faux-field that just maps to checking the system
# auditor role member list.. unfortunately this means we can't
# set it on creation, and thus needs to be set here.
user = User.objects.get(id=ret.data['id'])
user.is_system_auditor = request.data['is_system_auditor']
ret.data['is_system_auditor'] = request.data['is_system_auditor']
except AttributeError as exc:
print(exc)
pass
return ret
class UserMeList(ListAPIView):
model = User
serializer_class = UserSerializer
view_name = _('Me')
def get_queryset(self):
return self.model.objects.filter(pk=self.request.user.pk)
class UserTeamsList(ListAPIView):
model = User
serializer_class = TeamSerializer
def get_queryset(self):
u = get_object_or_404(User, pk=self.kwargs['pk'])
if not self.request.user.can_access(User, 'read', u):
raise PermissionDenied()
return Team.accessible_objects(self.request.user, 'read_role').filter(
Q(member_role__members=u) | Q(admin_role__members=u)).distinct()
class UserRolesList(SubListAttachDetachAPIView):
model = Role
serializer_class = RoleSerializerWithParentAccess
metadata_class = RoleMetadata
parent_model = User
relationship='roles'
permission_classes = (IsAuthenticated,)
new_in_300 = True
def get_queryset(self):
u = get_object_or_404(User, pk=self.kwargs['pk'])
if not self.request.user.can_access(User, 'read', u):
raise PermissionDenied()
content_type = ContentType.objects.get_for_model(User)
return Role.filter_visible_roles(self.request.user, u.roles.all()) \
.exclude(content_type=content_type, object_id=u.id)
def post(self, request, *args, **kwargs):
sub_id = request.data.get('id', None)
if not sub_id:
return super(UserRolesList, self).post(request)
if sub_id == self.request.user.admin_role.pk:
raise PermissionDenied(_('You may not perform any action with your own admin_role.'))
user = get_object_or_400(User, pk=self.kwargs['pk'])
role = get_object_or_400(Role, pk=sub_id)
user_content_type = ContentType.objects.get_for_model(User)
if role.content_type == user_content_type:
raise PermissionDenied(_('You may not change the membership of a users admin_role'))
credential_content_type = ContentType.objects.get_for_model(Credential)
if role.content_type == credential_content_type:
if role.content_object.organization and user not in role.content_object.organization.member_role:
data = dict(msg=_("You cannot grant credential access to a user not in the credentials' organization"))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
if not role.content_object.organization and not request.user.is_superuser:
data = dict(msg=_("You cannot grant private credential access to another user"))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
return super(UserRolesList, self).post(request, *args, **kwargs)
def check_parent_access(self, parent=None):
# We hide roles that shouldn't be seen in our queryset
return True
class UserProjectsList(SubListAPIView):
model = Project
serializer_class = ProjectSerializer
parent_model = User
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
my_qs = Project.accessible_objects(self.request.user, 'read_role')
user_qs = Project.accessible_objects(parent, 'read_role')
return my_qs & user_qs
class UserOrganizationsList(OrganizationCountsMixin, SubListAPIView):
model = Organization
serializer_class = OrganizationSerializer
parent_model = User
relationship = 'organizations'
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
my_qs = Organization.accessible_objects(self.request.user, 'read_role')
user_qs = Organization.objects.filter(member_role__members=parent)
return my_qs & user_qs
class UserAdminOfOrganizationsList(OrganizationCountsMixin, SubListAPIView):
model = Organization
serializer_class = OrganizationSerializer
parent_model = User
relationship = 'admin_of_organizations'
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
my_qs = Organization.accessible_objects(self.request.user, 'read_role')
user_qs = Organization.objects.filter(admin_role__members=parent)
return my_qs & user_qs
class UserActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = User
relationship = 'activitystream_set'
new_in_145 = True
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(Q(actor=parent) | Q(user__in=[parent]))
class UserDetail(RetrieveUpdateDestroyAPIView):
model = User
serializer_class = UserSerializer
def update_filter(self, request, *args, **kwargs):
''' make sure non-read-only fields that can only be edited by admins, are only edited by admins '''
obj = self.get_object()
can_change = request.user.can_access(User, 'change', obj, request.data)
can_admin = request.user.can_access(User, 'admin', obj, request.data)
su_only_edit_fields = ('is_superuser', 'is_system_auditor')
admin_only_edit_fields = ('username', 'is_active')
fields_to_check = ()
if not request.user.is_superuser:
fields_to_check += su_only_edit_fields
if can_change and not can_admin:
fields_to_check += admin_only_edit_fields
bad_changes = {}
for field in fields_to_check:
left = getattr(obj, field, None)
right = request.data.get(field, None)
if left is not None and right is not None and left != right:
bad_changes[field] = (left, right)
if bad_changes:
raise PermissionDenied(_('Cannot change %s.') % ', '.join(bad_changes.keys()))
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
can_delete = request.user.can_access(User, 'delete', obj)
if not can_delete:
raise PermissionDenied(_('Cannot delete user.'))
return super(UserDetail, self).destroy(request, *args, **kwargs)
class UserAccessList(ResourceAccessList):
model = User # needs to be User for AccessLists's
parent_model = User
new_in_300 = True
class CredentialTypeList(ListCreateAPIView):
model = CredentialType
serializer_class = CredentialTypeSerializer
new_in_320 = True
new_in_api_v2 = True
class CredentialTypeDetail(RetrieveUpdateDestroyAPIView):
model = CredentialType
serializer_class = CredentialTypeSerializer
new_in_320 = True
new_in_api_v2 = True
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if instance.managed_by_tower:
raise PermissionDenied(detail=_("Deletion not allowed for managed credential types"))
if instance.credentials.exists():
raise PermissionDenied(detail=_("Credential types that are in use cannot be deleted"))
return super(CredentialTypeDetail, self).destroy(request, *args, **kwargs)
class CredentialTypeCredentialList(SubListAPIView):
model = Credential
parent_model = CredentialType
relationship = 'credentials'
serializer_class = CredentialSerializer
new_in_320 = True
new_in_api_v2 = True
class CredentialTypeActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = CredentialType
relationship = 'activitystream_set'
new_in_320 = True
new_in_api_v2 = True
# remove in 3.3
class CredentialViewMixin(object):
@property
def related_search_fields(self):
ret = super(CredentialViewMixin, self).related_search_fields
if get_request_version(self.request) == 1 and 'credential_type__search' in ret:
ret.remove('credential_type__search')
return ret
class CredentialList(CredentialViewMixin, ListCreateAPIView):
model = Credential
serializer_class = CredentialSerializerCreate
capabilities_prefetch = ['admin', 'use']
filter_backends = ListCreateAPIView.filter_backends + [V1CredentialFilterBackend]
class CredentialOwnerUsersList(SubListAPIView):
model = User
serializer_class = UserSerializer
parent_model = Credential
relationship = 'admin_role.members'
new_in_300 = True
class CredentialOwnerTeamsList(SubListAPIView):
model = Team
serializer_class = TeamSerializer
parent_model = Credential
new_in_300 = True
def get_queryset(self):
credential = get_object_or_404(self.parent_model, pk=self.kwargs['pk'])
if not self.request.user.can_access(Credential, 'read', credential):
raise PermissionDenied()
content_type = ContentType.objects.get_for_model(self.model)
teams = [c.content_object.pk for c in credential.admin_role.parents.filter(content_type=content_type)]
return self.model.objects.filter(pk__in=teams)
class UserCredentialsList(CredentialViewMixin, SubListCreateAPIView):
model = Credential
serializer_class = UserCredentialSerializerCreate
parent_model = User
parent_key = 'user'
filter_backends = SubListCreateAPIView.filter_backends + [V1CredentialFilterBackend]
def get_queryset(self):
user = self.get_parent_object()
self.check_parent_access(user)
visible_creds = Credential.accessible_objects(self.request.user, 'read_role')
user_creds = Credential.accessible_objects(user, 'read_role')
return user_creds & visible_creds
class TeamCredentialsList(CredentialViewMixin, SubListCreateAPIView):
model = Credential
serializer_class = TeamCredentialSerializerCreate
parent_model = Team
parent_key = 'team'
filter_backends = SubListCreateAPIView.filter_backends + [V1CredentialFilterBackend]
def get_queryset(self):
team = self.get_parent_object()
self.check_parent_access(team)
visible_creds = Credential.accessible_objects(self.request.user, 'read_role')
team_creds = Credential.objects.filter(Q(use_role__parents=team.member_role) | Q(admin_role__parents=team.member_role))
return (team_creds & visible_creds).distinct()
class OrganizationCredentialList(CredentialViewMixin, SubListCreateAPIView):
model = Credential
serializer_class = OrganizationCredentialSerializerCreate
parent_model = Organization
parent_key = 'organization'
filter_backends = SubListCreateAPIView.filter_backends + [V1CredentialFilterBackend]
def get_queryset(self):
organization = self.get_parent_object()
self.check_parent_access(organization)
user_visible = Credential.accessible_objects(self.request.user, 'read_role').all()
org_set = Credential.accessible_objects(organization.admin_role, 'read_role').all()
if self.request.user.is_superuser or self.request.user.is_system_auditor:
return org_set
return org_set & user_visible
class CredentialDetail(RetrieveUpdateDestroyAPIView):
model = Credential
serializer_class = CredentialSerializer
filter_backends = RetrieveUpdateDestroyAPIView.filter_backends + [V1CredentialFilterBackend]
class CredentialActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = Credential
relationship = 'activitystream_set'
new_in_145 = True
class CredentialAccessList(ResourceAccessList):
model = User # needs to be User for AccessLists's
parent_model = Credential
new_in_300 = True
class CredentialObjectRolesList(SubListAPIView):
model = Role
serializer_class = RoleSerializer
parent_model = Credential
new_in_300 = True
def get_queryset(self):
po = self.get_parent_object()
content_type = ContentType.objects.get_for_model(self.parent_model)
return Role.objects.filter(content_type=content_type, object_id=po.pk)
class InventoryScriptList(ListCreateAPIView):
model = CustomInventoryScript
serializer_class = CustomInventoryScriptSerializer
new_in_210 = True
class InventoryScriptDetail(RetrieveUpdateDestroyAPIView):
model = CustomInventoryScript
serializer_class = CustomInventoryScriptSerializer
new_in_210 = True
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
can_delete = request.user.can_access(self.model, 'delete', instance)
if not can_delete:
raise PermissionDenied(_("Cannot delete inventory script."))
for inv_src in InventorySource.objects.filter(source_script=instance):
inv_src.source_script = None
inv_src.save()
return super(InventoryScriptDetail, self).destroy(request, *args, **kwargs)
class InventoryScriptObjectRolesList(SubListAPIView):
model = Role
serializer_class = RoleSerializer
parent_model = CustomInventoryScript
new_in_300 = True
def get_queryset(self):
po = self.get_parent_object()
content_type = ContentType.objects.get_for_model(self.parent_model)
return Role.objects.filter(content_type=content_type, object_id=po.pk)
class InventoryList(ListCreateAPIView):
model = Inventory
serializer_class = InventorySerializer
capabilities_prefetch = ['admin', 'adhoc']
def get_queryset(self):
qs = Inventory.accessible_objects(self.request.user, 'read_role')
qs = qs.select_related('admin_role', 'read_role', 'update_role', 'use_role', 'adhoc_role')
qs = qs.prefetch_related('created_by', 'modified_by', 'organization')
return qs
class ControlledByScmMixin(object):
'''
Special method to reset SCM inventory commit hash
if anything that it manages changes.
'''
def _reset_inv_src_rev(self, obj):
if self.request.method in SAFE_METHODS or not obj:
return
project_following_sources = obj.inventory_sources.filter(
update_on_project_update=True, source='scm')
if project_following_sources:
# Allow inventory changes unrelated to variables
if self.model == Inventory and (
not self.request or not self.request.data or
parse_yaml_or_json(self.request.data.get('variables', '')) == parse_yaml_or_json(obj.variables)):
return
project_following_sources.update(scm_last_revision='')
def get_object(self):
obj = super(ControlledByScmMixin, self).get_object()
self._reset_inv_src_rev(obj)
return obj
def get_parent_object(self):
obj = super(ControlledByScmMixin, self).get_parent_object()
self._reset_inv_src_rev(obj)
return obj
class InventoryDetail(ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
model = Inventory
serializer_class = InventoryDetailSerializer
def update(self, request, *args, **kwargs):
obj = self.get_object()
kind = self.request.data.get('kind') or kwargs.get('kind')
# Do not allow changes to an Inventory kind.
if kind is not None and obj.kind != kind:
return self.http_method_not_allowed(request, *args, **kwargs)
return super(InventoryDetail, self).update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'delete', obj):
raise PermissionDenied()
try:
obj.schedule_deletion(getattr(request.user, 'id', None))
return Response(status=status.HTTP_202_ACCEPTED)
except RuntimeError, e:
return Response(dict(error=_("{0}".format(e))), status=status.HTTP_400_BAD_REQUEST)
class InventoryActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = Inventory
relationship = 'activitystream_set'
new_in_145 = True
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(Q(inventory=parent) | Q(host__in=parent.hosts.all()) | Q(group__in=parent.groups.all()))
class InventoryInstanceGroupsList(SubListAttachDetachAPIView):
model = InstanceGroup
serializer_class = InstanceGroupSerializer
parent_model = Inventory
relationship = 'instance_groups'
new_in_320 = True
class InventoryAccessList(ResourceAccessList):
model = User # needs to be User for AccessLists's
parent_model = Inventory
new_in_300 = True
class InventoryObjectRolesList(SubListAPIView):
model = Role
serializer_class = RoleSerializer
parent_model = Inventory
new_in_300 = True
def get_queryset(self):
po = self.get_parent_object()
content_type = ContentType.objects.get_for_model(self.parent_model)
return Role.objects.filter(content_type=content_type, object_id=po.pk)
class InventoryJobTemplateList(SubListAPIView):
model = JobTemplate
serializer_class = JobTemplateSerializer
parent_model = Inventory
relationship = 'jobtemplates'
new_in_300 = True
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(inventory=parent)
class HostList(ListCreateAPIView):
always_allow_superuser = False
model = Host
serializer_class = HostSerializer
capabilities_prefetch = ['inventory.admin']
def get_queryset(self):
qs = super(HostList, self).get_queryset()
filter_string = self.request.query_params.get('host_filter', None)
if filter_string:
filter_qs = SmartFilter.query_from_string(filter_string)
qs &= filter_qs
return qs.distinct()
def list(self, *args, **kwargs):
try:
return super(HostList, self).list(*args, **kwargs)
except Exception as e:
return Response(dict(error=_(unicode(e))), status=status.HTTP_400_BAD_REQUEST)
class HostDetail(ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
always_allow_superuser = False
model = Host
serializer_class = HostSerializer
class HostAnsibleFactsDetail(RetrieveAPIView):
model = Host
serializer_class = AnsibleFactsSerializer
new_in_320 = True
new_in_api_v2 = True
class InventoryHostsList(SubListCreateAttachDetachAPIView):
model = Host
serializer_class = HostSerializer
parent_model = Inventory
relationship = 'hosts'
parent_key = 'inventory'
capabilities_prefetch = ['inventory.admin']
def get_queryset(self):
inventory = self.get_parent_object()
return getattrd(inventory, self.relationship).all()
class HostGroupsList(ControlledByScmMixin, SubListCreateAttachDetachAPIView):
''' the list of groups a host is directly a member of '''
model = Group
serializer_class = GroupSerializer
parent_model = Host
relationship = 'groups'
def update_raw_data(self, data):
data.pop('inventory', None)
return super(HostGroupsList, self).update_raw_data(data)
def create(self, request, *args, **kwargs):
# Inject parent host inventory ID into new group data.
data = request.data
# HACK: Make request data mutable.
if getattr(data, '_mutable', None) is False:
data._mutable = True
data['inventory'] = self.get_parent_object().inventory_id
return super(HostGroupsList, self).create(request, *args, **kwargs)
class HostAllGroupsList(SubListAPIView):
''' the list of all groups of which the host is directly or indirectly a member '''
model = Group
serializer_class = GroupSerializer
parent_model = Host
relationship = 'groups'
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model).distinct()
sublist_qs = parent.all_groups.distinct()
return qs & sublist_qs
class HostInventorySourcesList(SubListAPIView):
model = InventorySource
serializer_class = InventorySourceSerializer
parent_model = Host
relationship = 'inventory_sources'
new_in_148 = True
class HostSmartInventoriesList(SubListAPIView):
model = Inventory
serializer_class = InventorySerializer
parent_model = Host
relationship = 'smart_inventories'
new_in_320 = True
class HostActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = Host
relationship = 'activitystream_set'
new_in_145 = True
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(Q(host=parent) | Q(inventory=parent.inventory))
class HostFactVersionsList(SystemTrackingEnforcementMixin, ParentMixin, ListAPIView):
model = Fact
serializer_class = FactVersionSerializer
parent_model = Host
new_in_220 = True
def get_queryset(self):
from_spec = self.request.query_params.get('from', None)
to_spec = self.request.query_params.get('to', None)
module_spec = self.request.query_params.get('module', None)
if from_spec:
from_spec = dateutil.parser.parse(from_spec)
if to_spec:
to_spec = dateutil.parser.parse(to_spec)
host_obj = self.get_parent_object()
return Fact.get_timeline(host_obj.id, module=module_spec, ts_from=from_spec, ts_to=to_spec)
def list(self, *args, **kwargs):
queryset = self.get_queryset() or []
return Response(dict(results=self.serializer_class(queryset, many=True).data))
class HostFactCompareView(SystemTrackingEnforcementMixin, SubDetailAPIView):
model = Fact
new_in_220 = True
parent_model = Host
serializer_class = FactSerializer
def retrieve(self, request, *args, **kwargs):
datetime_spec = request.query_params.get('datetime', None)
module_spec = request.query_params.get('module', "ansible")
datetime_actual = dateutil.parser.parse(datetime_spec) if datetime_spec is not None else now()
host_obj = self.get_parent_object()
fact_entry = Fact.get_host_fact(host_obj.id, module_spec, datetime_actual)
if not fact_entry:
return Response({'detail': _('Fact not found.')}, status=status.HTTP_404_NOT_FOUND)
return Response(self.serializer_class(instance=fact_entry).data)
class HostInsights(GenericAPIView):
model = Host
serializer_class = EmptySerializer
new_in_320 = True
new_in_api_v2 = True
def _extract_insights_creds(self, credential):
return (credential.inputs['username'], decrypt_field(credential, 'password'))
def _get_insights(self, url, username, password):
session = requests.Session()
session.auth = requests.auth.HTTPBasicAuth(username, password)
headers = {'Content-Type': 'application/json'}
return session.get(url, headers=headers, timeout=120)
def get_insights(self, url, username, password):
try:
res = self._get_insights(url, username, password)
except requests.exceptions.SSLError:
return (dict(error=_('SSLError while trying to connect to {}').format(url)), status.HTTP_502_BAD_GATEWAY)
except requests.exceptions.Timeout:
return (dict(error=_('Request to {} timed out.').format(url)), status.HTTP_504_GATEWAY_TIMEOUT)
except requests.exceptions.RequestException as e:
return (dict(error=_('Unkown exception {} while trying to GET {}').format(e, url)), status.HTTP_502_BAD_GATEWAY)
if res.status_code == 401:
return (dict(error=_('Unauthorized access. Please check your Insights Credential username and password.')), status.HTTP_502_BAD_GATEWAY)
elif res.status_code != 200:
return (dict(error=_('Failed to gather reports and maintenance plans from Insights API at URL {}. Server responded with {} status code and message {}').format(url, res.status_code, res.content)), status.HTTP_502_BAD_GATEWAY)
try:
filtered_insights_content = filter_insights_api_response(res.json())
return (dict(insights_content=filtered_insights_content), status.HTTP_200_OK)
except ValueError:
return (dict(error=_('Expected JSON response from Insights but instead got {}').format(res.content)), status.HTTP_502_BAD_GATEWAY)
def get(self, request, *args, **kwargs):
host = self.get_object()
cred = None
if host.insights_system_id is None:
return Response(dict(error=_('This host is not recognized as an Insights host.')), status=status.HTTP_404_NOT_FOUND)
if host.inventory and host.inventory.insights_credential:
cred = host.inventory.insights_credential
else:
return Response(dict(error=_('The Insights Credential for "{}" was not found.').format(host.inventory.name)), status=status.HTTP_404_NOT_FOUND)
url = settings.INSIGHTS_URL_BASE + '/r/insights/v3/systems/{}/reports/'.format(host.insights_system_id)
(username, password) = self._extract_insights_creds(cred)
(msg, err_code) = self.get_insights(url, username, password)
return Response(msg, status=err_code)
class GroupList(ListCreateAPIView):
model = Group
serializer_class = GroupSerializer
capabilities_prefetch = ['inventory.admin', 'inventory.adhoc']
class EnforceParentRelationshipMixin(object):
'''
Useful when you have a self-refering ManyToManyRelationship.
* Tower uses a shallow (2-deep only) url pattern. For example:
When an object hangs off of a parent object you would have the url of the
form /api/v1/parent_model/34/child_model. If you then wanted a child of the
child model you would NOT do /api/v1/parent_model/34/child_model/87/child_child_model
Instead, you would access the child_child_model via /api/v1/child_child_model/87/
and you would create child_child_model's off of /api/v1/child_model/87/child_child_model_set
Now, when creating child_child_model related to child_model you still want to
link child_child_model to parent_model. That's what this class is for
'''
enforce_parent_relationship = ''
def update_raw_data(self, data):
data.pop(self.enforce_parent_relationship, None)
return super(EnforceParentRelationshipMixin, self).update_raw_data(data)
def create(self, request, *args, **kwargs):
# Inject parent group inventory ID into new group data.
data = request.data
# HACK: Make request data mutable.
if getattr(data, '_mutable', None) is False:
data._mutable = True
data[self.enforce_parent_relationship] = getattr(self.get_parent_object(), '%s_id' % self.enforce_parent_relationship)
return super(EnforceParentRelationshipMixin, self).create(request, *args, **kwargs)
class GroupChildrenList(ControlledByScmMixin, EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
model = Group
serializer_class = GroupSerializer
parent_model = Group
relationship = 'children'
enforce_parent_relationship = 'inventory'
def unattach(self, request, *args, **kwargs):
sub_id = request.data.get('id', None)
if sub_id is not None:
return super(GroupChildrenList, self).unattach(request, *args, **kwargs)
parent = self.get_parent_object()
if not request.user.can_access(self.model, 'delete', parent):
raise PermissionDenied()
parent.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def is_valid_relation(self, parent, sub, created=False):
# Prevent any cyclical group associations.
parent_pks = set(parent.all_parents.values_list('pk', flat=True))
parent_pks.add(parent.pk)
child_pks = set(sub.all_children.values_list('pk', flat=True))
child_pks.add(sub.pk)
if parent_pks & child_pks:
return {'error': _('Cyclical Group association.')}
return None
class GroupPotentialChildrenList(SubListAPIView):
model = Group
serializer_class = GroupSerializer
parent_model = Group
new_in_14 = True
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
qs = qs.filter(inventory__pk=parent.inventory.pk)
except_pks = set([parent.pk])
except_pks.update(parent.all_parents.values_list('pk', flat=True))
except_pks.update(parent.all_children.values_list('pk', flat=True))
return qs.exclude(pk__in=except_pks)
class GroupHostsList(ControlledByScmMixin, SubListCreateAttachDetachAPIView):
''' the list of hosts directly below a group '''
model = Host
serializer_class = HostSerializer
parent_model = Group
relationship = 'hosts'
capabilities_prefetch = ['inventory.admin']
def update_raw_data(self, data):
data.pop('inventory', None)
return super(GroupHostsList, self).update_raw_data(data)
def create(self, request, *args, **kwargs):
parent_group = Group.objects.get(id=self.kwargs['pk'])
# Inject parent group inventory ID into new host data.
request.data['inventory'] = parent_group.inventory_id
existing_hosts = Host.objects.filter(inventory=parent_group.inventory, name=request.data.get('name', ''))
if existing_hosts.count() > 0 and ('variables' not in request.data or
request.data['variables'] == '' or
request.data['variables'] == '{}' or
request.data['variables'] == '---'):
request.data['id'] = existing_hosts[0].id
return self.attach(request, *args, **kwargs)
return super(GroupHostsList, self).create(request, *args, **kwargs)
class GroupAllHostsList(SubListAPIView):
''' the list of all hosts below a group, even including subgroups '''
model = Host
serializer_class = HostSerializer
parent_model = Group
relationship = 'hosts'
capabilities_prefetch = ['inventory.admin']
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model).distinct() # need distinct for '&' operator
sublist_qs = parent.all_hosts.distinct()
return qs & sublist_qs
class GroupInventorySourcesList(SubListAPIView):
model = InventorySource
serializer_class = InventorySourceSerializer
parent_model = Group
relationship = 'inventory_sources'
new_in_148 = True
class GroupActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = Group
relationship = 'activitystream_set'
new_in_145 = True
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(Q(group=parent) | Q(host__in=parent.hosts.all()))
class GroupDetail(ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
model = Group
serializer_class = GroupSerializer
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'delete', obj):
raise PermissionDenied()
if get_request_version(request) == 1: # TODO: deletion of automatic inventory_source, remove in 3.3
try:
obj.deprecated_inventory_source.delete()
except Group.deprecated_inventory_source.RelatedObjectDoesNotExist:
pass
obj.delete_recursive()
return Response(status=status.HTTP_204_NO_CONTENT)
class InventoryGroupsList(SubListCreateAttachDetachAPIView):
model = Group
serializer_class = GroupSerializer
parent_model = Inventory
relationship = 'groups'
parent_key = 'inventory'
class InventoryRootGroupsList(SubListCreateAttachDetachAPIView):
model = Group
serializer_class = GroupSerializer
parent_model = Inventory
relationship = 'groups'
parent_key = 'inventory'
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model).distinct() # need distinct for '&' operator
return qs & parent.root_groups
class BaseVariableData(RetrieveUpdateAPIView):
parser_classes = api_settings.DEFAULT_PARSER_CLASSES + [YAMLParser]
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES + [YAMLRenderer]
is_variable_data = True # Special flag for permissions check.
class InventoryVariableData(BaseVariableData):
model = Inventory
serializer_class = InventoryVariableDataSerializer
class HostVariableData(BaseVariableData):
model = Host
serializer_class = HostVariableDataSerializer
class GroupVariableData(BaseVariableData):
model = Group
serializer_class = GroupVariableDataSerializer
class InventoryScriptView(RetrieveAPIView):
model = Inventory
serializer_class = InventoryScriptSerializer
authentication_classes = [TaskAuthentication] + api_settings.DEFAULT_AUTHENTICATION_CLASSES
permission_classes = (TaskPermission,)
filter_backends = ()
def retrieve(self, request, *args, **kwargs):
obj = self.get_object()
hostname = request.query_params.get('host', '')
hostvars = bool(request.query_params.get('hostvars', ''))
show_all = bool(request.query_params.get('all', ''))
if show_all:
hosts_q = dict()
else:
hosts_q = dict(enabled=True)
if hostname:
host = get_object_or_404(obj.hosts, name=hostname, **hosts_q)
data = host.variables_dict
else:
data = OrderedDict()
if obj.variables_dict:
all_group = data.setdefault('all', OrderedDict())
all_group['vars'] = obj.variables_dict
if obj.kind == 'smart':
if len(obj.hosts.all()) == 0:
return Response({})
else:
all_group = data.setdefault('all', OrderedDict())
smart_hosts_qs = obj.hosts.all().order_by('name')
smart_hosts = list(smart_hosts_qs.values_list('name', flat=True))
all_group['hosts'] = smart_hosts
else:
# Add hosts without a group to the all group.
groupless_hosts_qs = obj.hosts.filter(groups__isnull=True, **hosts_q).order_by('name')
groupless_hosts = list(groupless_hosts_qs.values_list('name', flat=True))
if groupless_hosts:
all_group = data.setdefault('all', OrderedDict())
all_group['hosts'] = groupless_hosts
# Build in-memory mapping of groups and their hosts.
group_hosts_kw = dict(group__inventory_id=obj.id, host__inventory_id=obj.id)
if 'enabled' in hosts_q:
group_hosts_kw['host__enabled'] = hosts_q['enabled']
group_hosts_qs = Group.hosts.through.objects.filter(**group_hosts_kw)
group_hosts_qs = group_hosts_qs.order_by('host__name')
group_hosts_qs = group_hosts_qs.values_list('group_id', 'host_id', 'host__name')
group_hosts_map = {}
for group_id, host_id, host_name in group_hosts_qs:
group_hostnames = group_hosts_map.setdefault(group_id, [])
group_hostnames.append(host_name)
# Build in-memory mapping of groups and their children.
group_parents_qs = Group.parents.through.objects.filter(
from_group__inventory_id=obj.id,
to_group__inventory_id=obj.id,
)
group_parents_qs = group_parents_qs.order_by('from_group__name')
group_parents_qs = group_parents_qs.values_list('from_group_id', 'from_group__name', 'to_group_id')
group_children_map = {}
for from_group_id, from_group_name, to_group_id in group_parents_qs:
group_children = group_children_map.setdefault(to_group_id, [])
group_children.append(from_group_name)
# Now use in-memory maps to build up group info.
for group in obj.groups.all():
group_info = OrderedDict()
group_info['hosts'] = group_hosts_map.get(group.id, [])
group_info['children'] = group_children_map.get(group.id, [])
group_info['vars'] = group.variables_dict
data[group.name] = group_info
if hostvars:
data.setdefault('_meta', OrderedDict())
data['_meta'].setdefault('hostvars', OrderedDict())
for host in obj.hosts.filter(**hosts_q):
data['_meta']['hostvars'][host.name] = host.variables_dict
return Response(data)
class InventoryTreeView(RetrieveAPIView):
model = Inventory
serializer_class = GroupTreeSerializer
filter_backends = ()
new_in_13 = True
def _populate_group_children(self, group_data, all_group_data_map, group_children_map):
if 'children' in group_data:
return
group_data['children'] = []
for child_id in group_children_map.get(group_data['id'], set()):
group_data['children'].append(all_group_data_map[child_id])
group_data['children'].sort(key=lambda x: x['name'])
for child_data in group_data['children']:
self._populate_group_children(child_data, all_group_data_map, group_children_map)
def retrieve(self, request, *args, **kwargs):
inventory = self.get_object()
group_children_map = inventory.get_group_children_map()
root_group_pks = inventory.root_groups.order_by('name').values_list('pk', flat=True)
groups_qs = inventory.groups
groups_qs = groups_qs.prefetch_related('inventory_sources')
all_group_data = GroupSerializer(groups_qs, many=True).data
all_group_data_map = dict((x['id'], x) for x in all_group_data)
tree_data = [all_group_data_map[x] for x in root_group_pks]
for group_data in tree_data:
self._populate_group_children(group_data, all_group_data_map,
group_children_map)
return Response(tree_data)
class InventoryInventorySourcesList(SubListCreateAPIView):
view_name = _('Inventory Source List')
model = InventorySource
serializer_class = InventorySourceSerializer
parent_model = Inventory
# Sometimes creation blocked by SCM inventory source restrictions
always_allow_superuser = False
relationship = 'inventory_sources'
parent_key = 'inventory'
new_in_320 = True
class InventoryInventorySourcesUpdate(RetrieveAPIView):
view_name = _('Inventory Sources Update')
model = Inventory
serializer_class = InventorySourceUpdateSerializer
permission_classes = (InventoryInventorySourcesUpdatePermission,)
is_job_start = True
new_in_320 = True
def retrieve(self, request, *args, **kwargs):
inventory = self.get_object()
update_data = []
for inventory_source in inventory.inventory_sources.exclude(source=''):
details = {'inventory_source': inventory_source.pk,
'can_update': inventory_source.can_update}
update_data.append(details)
return Response(update_data)
def post(self, request, *args, **kwargs):
inventory = self.get_object()
update_data = []
successes = 0
failures = 0
for inventory_source in inventory.inventory_sources.exclude(source=''):
details = {'inventory_source': inventory_source.pk, 'status': None}
if inventory_source.can_update:
details['status'] = 'started'
details['inventory_update'] = inventory_source.update().id
successes += 1
else:
if not details.get('status'):
details['status'] = _('Could not start because `can_update` returned False')
failures += 1
update_data.append(details)
if failures and successes:
status_code = status.HTTP_202_ACCEPTED
elif failures and not successes:
status_code = status.HTTP_400_BAD_REQUEST
elif not failures and not successes:
return Response({'detail': _('No inventory sources to update.')},
status=status.HTTP_400_BAD_REQUEST)
else:
status_code = status.HTTP_200_OK
return Response(update_data, status=status_code)
class InventorySourceList(ListCreateAPIView):
model = InventorySource
serializer_class = InventorySourceSerializer
always_allow_superuser = False
new_in_320 = True
@property
def allowed_methods(self):
methods = super(InventorySourceList, self).allowed_methods
if get_request_version(self.request) == 1:
methods.remove('POST')
return methods
class InventorySourceDetail(RetrieveUpdateDestroyAPIView):
model = InventorySource
serializer_class = InventorySourceSerializer
new_in_14 = True
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
can_delete = request.user.can_access(InventorySource, 'delete', obj)
if not can_delete:
raise PermissionDenied(_("Cannot delete inventory source."))
for pu in obj.inventory_updates.filter(status__in=['new', 'pending', 'waiting', 'running']):
pu.cancel()
return super(InventorySourceDetail, self).destroy(request, *args, **kwargs)
class InventorySourceSchedulesList(SubListCreateAPIView):
view_name = _("Inventory Source Schedules")
model = Schedule
serializer_class = ScheduleSerializer
parent_model = InventorySource
relationship = 'schedules'
parent_key = 'unified_job_template'
new_in_148 = True
class InventorySourceActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = InventorySource
relationship = 'activitystream_set'
new_in_145 = True
class InventorySourceNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = InventorySource
relationship = 'notification_templates_any'
new_in_300 = True
def post(self, request, *args, **kwargs):
parent = self.get_parent_object()
if parent.source not in CLOUD_INVENTORY_SOURCES:
return Response(dict(msg=_("Notification Templates can only be assigned when source is one of {}.")
.format(CLOUD_INVENTORY_SOURCES, parent.source)),
status=status.HTTP_400_BAD_REQUEST)
return super(InventorySourceNotificationTemplatesAnyList, self).post(request, *args, **kwargs)
class InventorySourceNotificationTemplatesErrorList(InventorySourceNotificationTemplatesAnyList):
relationship = 'notification_templates_error'
class InventorySourceNotificationTemplatesSuccessList(InventorySourceNotificationTemplatesAnyList):
relationship = 'notification_templates_success'
class InventorySourceHostsList(SubListAPIView):
model = Host
serializer_class = HostSerializer
parent_model = InventorySource
relationship = 'hosts'
new_in_148 = True
capabilities_prefetch = ['inventory.admin']
class InventorySourceGroupsList(SubListAPIView):
model = Group
serializer_class = GroupSerializer
parent_model = InventorySource
relationship = 'groups'
new_in_148 = True
class InventorySourceUpdatesList(SubListAPIView):
model = InventoryUpdate
serializer_class = InventoryUpdateSerializer
parent_model = InventorySource
relationship = 'inventory_updates'
new_in_14 = True
class InventorySourceUpdateView(RetrieveAPIView):
model = InventorySource
serializer_class = InventorySourceUpdateSerializer
is_job_start = True
new_in_14 = True
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_update:
update = obj.update()
if not update:
return Response({}, status=status.HTTP_400_BAD_REQUEST)
else:
headers = {'Location': update.get_absolute_url(request=request)}
return Response(dict(inventory_update=update.id),
status=status.HTTP_202_ACCEPTED, headers=headers)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class InventoryUpdateList(ListAPIView):
model = InventoryUpdate
serializer_class = InventoryUpdateListSerializer
class InventoryUpdateDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
model = InventoryUpdate
serializer_class = InventoryUpdateSerializer
new_in_14 = True
class InventoryUpdateCancel(RetrieveAPIView):
model = InventoryUpdate
serializer_class = InventoryUpdateCancelSerializer
is_job_cancel = True
new_in_14 = True
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class InventoryUpdateNotificationsList(SubListAPIView):
model = Notification
serializer_class = NotificationSerializer
parent_model = InventoryUpdate
relationship = 'notifications'
new_in_300 = True
class JobTemplateList(ListCreateAPIView):
model = JobTemplate
metadata_class = JobTypeMetadata
serializer_class = JobTemplateSerializer
always_allow_superuser = False
capabilities_prefetch = [
'admin', 'execute',
{'copy': ['project.use', 'inventory.use', 'credential.use', 'vault_credential.use']}
]
def post(self, request, *args, **kwargs):
ret = super(JobTemplateList, self).post(request, *args, **kwargs)
if ret.status_code == 201:
job_template = JobTemplate.objects.get(id=ret.data['id'])
job_template.admin_role.members.add(request.user)
return ret
class JobTemplateDetail(RetrieveUpdateDestroyAPIView):
model = JobTemplate
metadata_class = JobTypeMetadata
serializer_class = JobTemplateSerializer
always_allow_superuser = False
class JobTemplateLaunch(RetrieveAPIView, GenericAPIView):
model = JobTemplate
metadata_class = JobTypeMetadata
serializer_class = JobLaunchSerializer
is_job_start = True
always_allow_superuser = False
def update_raw_data(self, data):
try:
obj = self.get_object()
except PermissionDenied:
return data
extra_vars = data.pop('extra_vars', None) or {}
if obj:
for p in obj.passwords_needed_to_start:
data[p] = u''
for v in obj.variables_needed_to_start:
extra_vars.setdefault(v, u'')
if extra_vars:
data['extra_vars'] = extra_vars
ask_for_vars_dict = obj._ask_for_vars_dict()
ask_for_vars_dict.pop('extra_vars')
if get_request_version(self.request) == 1: # TODO: remove in 3.3
ask_for_vars_dict.pop('extra_credentials')
for field in ask_for_vars_dict:
if not ask_for_vars_dict[field]:
data.pop(field, None)
elif field == 'inventory' or field == 'credential':
data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None)
elif field == 'extra_credentials':
data[field] = [cred.id for cred in obj.extra_credentials.all()]
else:
data[field] = getattr(obj, field)
return data
def post(self, request, *args, **kwargs):
obj = self.get_object()
ignored_fields = {}
for fd in ('credential', 'vault_credential', 'inventory'):
id_fd = '{}_id'.format(fd)
if fd not in request.data and id_fd in request.data:
request.data[fd] = request.data[id_fd]
if get_request_version(self.request) == 1 and 'extra_credentials' in request.data: # TODO: remove in 3.3
if hasattr(request.data, '_mutable') and not request.data._mutable:
request.data._mutable = True
extra_creds = request.data.pop('extra_credentials', None)
if extra_creds is not None:
ignored_fields['extra_credentials'] = extra_creds
passwords = {}
serializer = self.serializer_class(instance=obj, data=request.data, context={'obj': obj, 'data': request.data, 'passwords': passwords})
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
_accepted_or_ignored = obj._accept_or_ignore_job_kwargs(**request.data)
prompted_fields = _accepted_or_ignored[0]
ignored_fields.update(_accepted_or_ignored[1])
for fd, model in (
('credential', Credential),
('vault_credential', Credential),
('inventory', Inventory)):
if fd in prompted_fields and prompted_fields[fd] != getattrd(obj, '{}.pk'.format(fd), None):
new_res = get_object_or_400(model, pk=get_pk_from_dict(prompted_fields, fd))
use_role = getattr(new_res, 'use_role')
if request.user not in use_role:
raise PermissionDenied()
for cred in prompted_fields.get('extra_credentials', []):
new_credential = get_object_or_400(Credential, pk=cred)
if request.user not in new_credential.use_role:
raise PermissionDenied()
new_job = obj.create_unified_job(**prompted_fields)
result = new_job.signal_start(**passwords)
if not result:
data = dict(passwords_needed_to_start=new_job.passwords_needed_to_start)
new_job.delete()
return Response(data, status=status.HTTP_400_BAD_REQUEST)
else:
data = OrderedDict()
data['ignored_fields'] = ignored_fields
data.update(JobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
data['job'] = new_job.id
return Response(data, status=status.HTTP_201_CREATED)
class JobTemplateSchedulesList(SubListCreateAPIView):
view_name = _("Job Template Schedules")
model = Schedule
serializer_class = ScheduleSerializer
parent_model = JobTemplate
relationship = 'schedules'
parent_key = 'unified_job_template'
new_in_148 = True
class JobTemplateSurveySpec(GenericAPIView):
model = JobTemplate
parent_model = JobTemplate
serializer_class = EmptySerializer
new_in_210 = True
def get(self, request, *args, **kwargs):
obj = self.get_object()
if not feature_enabled('surveys'):
raise LicenseForbids(_('Your license does not allow '
'adding surveys.'))
survey_spec = obj.survey_spec
for pos, field in enumerate(survey_spec.get('spec', [])):
if field.get('type') == 'password':
if 'default' in field and field['default']:
field['default'] = '$encrypted$'
return Response(survey_spec)
def post(self, request, *args, **kwargs):
obj = self.get_object()
# Sanity check: Are surveys available on this license?
# If not, do not allow them to be used.
if not feature_enabled('surveys'):
raise LicenseForbids(_('Your license does not allow '
'adding surveys.'))
if not request.user.can_access(self.model, 'change', obj, None):
raise PermissionDenied()
new_spec = request.data
if "name" not in new_spec:
return Response(dict(error=_("'name' missing from survey spec.")), status=status.HTTP_400_BAD_REQUEST)
if "description" not in new_spec:
return Response(dict(error=_("'description' missing from survey spec.")), status=status.HTTP_400_BAD_REQUEST)
if "spec" not in new_spec:
return Response(dict(error=_("'spec' missing from survey spec.")), status=status.HTTP_400_BAD_REQUEST)
if not isinstance(new_spec["spec"], list):
return Response(dict(error=_("'spec' must be a list of items.")), status=status.HTTP_400_BAD_REQUEST)
if len(new_spec["spec"]) < 1:
return Response(dict(error=_("'spec' doesn't contain any items.")), status=status.HTTP_400_BAD_REQUEST)
idx = 0
variable_set = set()
for survey_item in new_spec["spec"]:
if not isinstance(survey_item, dict):
return Response(dict(error=_("Survey question %s is not a json object.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
if "type" not in survey_item:
return Response(dict(error=_("'type' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
if "question_name" not in survey_item:
return Response(dict(error=_("'question_name' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
if "variable" not in survey_item:
return Response(dict(error=_("'variable' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
if survey_item['variable'] in variable_set:
return Response(dict(error=_("'variable' '%(item)s' duplicated in survey question %(survey)s.") % {
'item': survey_item['variable'], 'survey': str(idx)}), status=status.HTTP_400_BAD_REQUEST)
else:
variable_set.add(survey_item['variable'])
if "required" not in survey_item:
return Response(dict(error=_("'required' missing from survey question %s.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
if survey_item["type"] == "password":
if survey_item.get("default") and survey_item["default"].startswith('$encrypted$'):
if not obj.survey_spec:
return Response(dict(error=_("$encrypted$ is reserved keyword and may not be used as a default for password {}.".format(str(idx)))),
status=status.HTTP_400_BAD_REQUEST)
else:
old_spec = obj.survey_spec
for old_item in old_spec['spec']:
if old_item['variable'] == survey_item['variable']:
survey_item['default'] = old_item['default']
idx += 1
obj.survey_spec = new_spec
obj.save(update_fields=['survey_spec'])
return Response()
def delete(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'delete', obj):
raise PermissionDenied()
obj.survey_spec = {}
obj.save()
return Response()
class WorkflowJobTemplateSurveySpec(WorkflowsEnforcementMixin, JobTemplateSurveySpec):
model = WorkflowJobTemplate
parent_model = WorkflowJobTemplate
new_in_310 = True
class JobTemplateActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = JobTemplate
relationship = 'activitystream_set'
new_in_145 = True
class JobTemplateNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = JobTemplate
relationship = 'notification_templates_any'
new_in_300 = True
class JobTemplateNotificationTemplatesErrorList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = JobTemplate
relationship = 'notification_templates_error'
new_in_300 = True
class JobTemplateNotificationTemplatesSuccessList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = JobTemplate
relationship = 'notification_templates_success'
new_in_300 = True
class JobTemplateExtraCredentialsList(SubListCreateAttachDetachAPIView):
model = Credential
serializer_class = CredentialSerializer
parent_model = JobTemplate
relationship = 'extra_credentials'
new_in_320 = True
new_in_api_v2 = True
def get_queryset(self):
# Return the full list of extra_credentials
parent = self.get_parent_object()
self.check_parent_access(parent)
sublist_qs = getattrd(parent, self.relationship)
sublist_qs = sublist_qs.prefetch_related(
'created_by', 'modified_by',
'admin_role', 'use_role', 'read_role',
'admin_role__parents', 'admin_role__members')
return sublist_qs
def is_valid_relation(self, parent, sub, created=False):
current_extra_types = [
cred.credential_type.pk for cred in parent.extra_credentials.all()
]
if sub.credential_type.pk in current_extra_types:
return {'error': _('Cannot assign multiple %s credentials.' % sub.credential_type.name)}
if sub.credential_type.kind not in ('net', 'cloud'):
return {'error': _('Extra credentials must be network or cloud.')}
return super(JobTemplateExtraCredentialsList, self).is_valid_relation(parent, sub, created)
class JobTemplateLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView):
model = Label
serializer_class = LabelSerializer
parent_model = JobTemplate
relationship = 'labels'
new_in_300 = True
def post(self, request, *args, **kwargs):
# If a label already exists in the database, attach it instead of erroring out
# that it already exists
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
existing = Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
if existing.exists():
existing = existing[0]
request.data['id'] = existing.id
del request.data['name']
del request.data['organization']
if Label.objects.filter(unifiedjobtemplate_labels=self.kwargs['pk']).count() > 100:
return Response(dict(msg=_('Maximum number of labels for {} reached.'.format(
self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST)
return super(JobTemplateLabelList, self).post(request, *args, **kwargs)
class JobTemplateCallback(GenericAPIView):
model = JobTemplate
permission_classes = (JobTemplateCallbackPermission,)
serializer_class = EmptySerializer
parser_classes = api_settings.DEFAULT_PARSER_CLASSES + [FormParser]
@csrf_exempt
@transaction.non_atomic_requests
def dispatch(self, *args, **kwargs):
return super(JobTemplateCallback, self).dispatch(*args, **kwargs)
def find_matching_hosts(self):
'''
Find the host(s) in the job template's inventory that match the remote
host for the current request.
'''
# Find the list of remote host names/IPs to check.
remote_hosts = set()
for header in settings.REMOTE_HOST_HEADERS:
for value in self.request.META.get(header, '').split(','):
value = value.strip()
if value:
remote_hosts.add(value)
# Add the reverse lookup of IP addresses.
for rh in list(remote_hosts):
try:
result = socket.gethostbyaddr(rh)
except socket.herror:
continue
except socket.gaierror:
continue
remote_hosts.add(result[0])
remote_hosts.update(result[1])
# Filter out any .arpa results.
for rh in list(remote_hosts):
if rh.endswith('.arpa'):
remote_hosts.remove(rh)
if not remote_hosts:
return set()
# Find the host objects to search for a match.
obj = self.get_object()
hosts = obj.inventory.hosts.all()
# Populate host_mappings
host_mappings = {}
for host in hosts:
host_name = host.get_effective_host_name()
host_mappings.setdefault(host_name, [])
host_mappings[host_name].append(host)
# Try finding direct match
matches = set()
for host_name in remote_hosts:
if host_name in host_mappings:
matches.update(host_mappings[host_name])
if len(matches) == 1:
return matches
# Try to resolve forward addresses for each host to find matches.
for host_name in host_mappings:
try:
result = socket.getaddrinfo(host_name, None)
possible_ips = set(x[4][0] for x in result)
possible_ips.discard(host_name)
if possible_ips and possible_ips & remote_hosts:
matches.update(host_mappings[host_name])
except socket.gaierror:
pass
return matches
def get(self, request, *args, **kwargs):
job_template = self.get_object()
matching_hosts = self.find_matching_hosts()
data = dict(
host_config_key=job_template.host_config_key,
matching_hosts=[x.name for x in matching_hosts],
)
if settings.DEBUG:
d = dict([(k,v) for k,v in request.META.items()
if k.startswith('HTTP_') or k.startswith('REMOTE_')])
data['request_meta'] = d
return Response(data)
def post(self, request, *args, **kwargs):
extra_vars = None
# Be careful here: content_type can look like '<content_type>; charset=blar'
if request.content_type.startswith("application/json"):
extra_vars = request.data.get("extra_vars", None)
# Permission class should have already validated host_config_key.
job_template = self.get_object()
# Attempt to find matching hosts based on remote address.
matching_hosts = self.find_matching_hosts()
# If the host is not found, update the inventory before trying to
# match again.
inventory_sources_already_updated = []
if len(matching_hosts) != 1:
inventory_sources = job_template.inventory.inventory_sources.filter( update_on_launch=True)
inventory_update_pks = set()
for inventory_source in inventory_sources:
if inventory_source.needs_update_on_launch:
# FIXME: Doesn't check for any existing updates.
inventory_update = inventory_source.create_inventory_update(launch_type='callback')
inventory_update.signal_start()
inventory_update_pks.add(inventory_update.pk)
inventory_update_qs = InventoryUpdate.objects.filter(pk__in=inventory_update_pks, status__in=('pending', 'waiting', 'running'))
# Poll for the inventory updates we've started to complete.
while inventory_update_qs.count():
time.sleep(1.0)
transaction.commit()
# Ignore failed inventory updates here, only add successful ones
# to the list to be excluded when running the job.
for inventory_update in InventoryUpdate.objects.filter(pk__in=inventory_update_pks, status='successful'):
inventory_sources_already_updated.append(inventory_update.inventory_source_id)
matching_hosts = self.find_matching_hosts()
# Check matching hosts.
if not matching_hosts:
data = dict(msg=_('No matching host could be found!'))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
elif len(matching_hosts) > 1:
data = dict(msg=_('Multiple hosts matched the request!'))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
else:
host = list(matching_hosts)[0]
if not job_template.can_start_without_user_input(callback_extra_vars=extra_vars):
data = dict(msg=_('Cannot start automatically, user input required!'))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
limit = host.name
# NOTE: We limit this to one job waiting per host per callblack to keep them from stacking crazily
if Job.objects.filter(status__in=['pending', 'waiting', 'running'], job_template=job_template,
limit=limit).count() > 0:
data = dict(msg=_('Host callback job already pending.'))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
# Everything is fine; actually create the job.
kv = {"limit": limit, "launch_type": 'callback'}
if extra_vars is not None and job_template.ask_variables_on_launch:
kv['extra_vars'] = callback_filter_out_ansible_extra_vars(extra_vars)
with transaction.atomic():
job = job_template.create_job(**kv)
# Send a signal to celery that the job should be started.
result = job.signal_start(inventory_sources_already_updated=inventory_sources_already_updated)
if not result:
data = dict(msg=_('Error starting job!'))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
# Return the location of the new job.
headers = {'Location': job.get_absolute_url(request=request)}
return Response(status=status.HTTP_201_CREATED, headers=headers)
class JobTemplateJobsList(SubListCreateAPIView):
model = Job
serializer_class = JobListSerializer
parent_model = JobTemplate
relationship = 'jobs'
parent_key = 'job_template'
class JobTemplateInstanceGroupsList(SubListAttachDetachAPIView):
model = InstanceGroup
serializer_class = InstanceGroupSerializer
parent_model = JobTemplate
relationship = 'instance_groups'
new_in_320 = True
class JobTemplateAccessList(ResourceAccessList):
model = User # needs to be User for AccessLists's
parent_model = JobTemplate
new_in_300 = True
class JobTemplateObjectRolesList(SubListAPIView):
model = Role
serializer_class = RoleSerializer
parent_model = JobTemplate
new_in_300 = True
def get_queryset(self):
po = self.get_parent_object()
content_type = ContentType.objects.get_for_model(self.parent_model)
return Role.objects.filter(content_type=content_type, object_id=po.pk)
class WorkflowJobNodeList(WorkflowsEnforcementMixin, ListAPIView):
model = WorkflowJobNode
serializer_class = WorkflowJobNodeListSerializer
new_in_310 = True
class WorkflowJobNodeDetail(WorkflowsEnforcementMixin, RetrieveAPIView):
model = WorkflowJobNode
serializer_class = WorkflowJobNodeDetailSerializer
new_in_310 = True
class WorkflowJobTemplateNodeList(WorkflowsEnforcementMixin, ListCreateAPIView):
model = WorkflowJobTemplateNode
serializer_class = WorkflowJobTemplateNodeListSerializer
new_in_310 = True
class WorkflowJobTemplateNodeDetail(WorkflowsEnforcementMixin, RetrieveUpdateDestroyAPIView):
model = WorkflowJobTemplateNode
serializer_class = WorkflowJobTemplateNodeDetailSerializer
new_in_310 = True
def update_raw_data(self, data):
for fd in ['job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags']:
data[fd] = None
try:
obj = self.get_object()
data.update(obj.char_prompts)
except:
pass
return super(WorkflowJobTemplateNodeDetail, self).update_raw_data(data)
class WorkflowJobTemplateNodeChildrenBaseList(WorkflowsEnforcementMixin, EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
model = WorkflowJobTemplateNode
serializer_class = WorkflowJobTemplateNodeListSerializer
always_allow_superuser = True
parent_model = WorkflowJobTemplateNode
relationship = ''
enforce_parent_relationship = 'workflow_job_template'
new_in_310 = True
'''
Limit the set of WorkflowJobTemplateNodes to the related nodes of specified by
'relationship'
'''
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
return getattr(parent, self.relationship).all()
def is_valid_relation(self, parent, sub, created=False):
mutex_list = ('success_nodes', 'failure_nodes') if self.relationship == 'always_nodes' else ('always_nodes',)
for relation in mutex_list:
if getattr(parent, relation).all().exists():
return {'Error': _('Cannot associate {0} when {1} have been associated.').format(self.relationship, relation)}
if created:
return None
workflow_nodes = parent.workflow_job_template.workflow_job_template_nodes.all().\
prefetch_related('success_nodes', 'failure_nodes', 'always_nodes')
graph = {}
for workflow_node in workflow_nodes:
graph[workflow_node.pk] = dict(node_object=workflow_node, metadata={'parent': None, 'traversed': False})
find = False
for node_type in ['success_nodes', 'failure_nodes', 'always_nodes']:
for workflow_node in workflow_nodes:
parent_node = graph[workflow_node.pk]
related_nodes = getattr(parent_node['node_object'], node_type).all()
for related_node in related_nodes:
sub_node = graph[related_node.pk]
sub_node['metadata']['parent'] = parent_node
if not find and parent == workflow_node and sub == related_node and self.relationship == node_type:
find = True
if not find:
sub_node = graph[sub.pk]
parent_node = graph[parent.pk]
if sub_node['metadata']['parent'] is not None:
return {"Error": _("Multiple parent relationship not allowed.")}
sub_node['metadata']['parent'] = parent_node
iter_node = sub_node
while iter_node is not None:
if iter_node['metadata']['traversed']:
return {"Error": _("Cycle detected.")}
iter_node['metadata']['traversed'] = True
iter_node = iter_node['metadata']['parent']
return None
class WorkflowJobTemplateNodeSuccessNodesList(WorkflowJobTemplateNodeChildrenBaseList):
relationship = 'success_nodes'
class WorkflowJobTemplateNodeFailureNodesList(WorkflowJobTemplateNodeChildrenBaseList):
relationship = 'failure_nodes'
class WorkflowJobTemplateNodeAlwaysNodesList(WorkflowJobTemplateNodeChildrenBaseList):
relationship = 'always_nodes'
class WorkflowJobNodeChildrenBaseList(WorkflowsEnforcementMixin, SubListAPIView):
model = WorkflowJobNode
serializer_class = WorkflowJobNodeListSerializer
parent_model = WorkflowJobNode
relationship = ''
new_in_310 = True
#
#Limit the set of WorkflowJobeNodes to the related nodes of specified by
#'relationship'
#
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
return getattr(parent, self.relationship).all()
class WorkflowJobNodeSuccessNodesList(WorkflowJobNodeChildrenBaseList):
relationship = 'success_nodes'
class WorkflowJobNodeFailureNodesList(WorkflowJobNodeChildrenBaseList):
relationship = 'failure_nodes'
class WorkflowJobNodeAlwaysNodesList(WorkflowJobNodeChildrenBaseList):
relationship = 'always_nodes'
class WorkflowJobTemplateList(WorkflowsEnforcementMixin, ListCreateAPIView):
model = WorkflowJobTemplate
serializer_class = WorkflowJobTemplateListSerializer
always_allow_superuser = False
new_in_310 = True
class WorkflowJobTemplateDetail(WorkflowsEnforcementMixin, RetrieveUpdateDestroyAPIView):
model = WorkflowJobTemplate
serializer_class = WorkflowJobTemplateSerializer
always_allow_superuser = False
new_in_310 = True
class WorkflowJobTemplateCopy(WorkflowsEnforcementMixin, GenericAPIView):
model = WorkflowJobTemplate
parent_model = WorkflowJobTemplate
serializer_class = EmptySerializer
new_in_310 = True
def get(self, request, *args, **kwargs):
obj = self.get_object()
can_copy, messages = request.user.can_access_with_errors(self.model, 'copy', obj)
data = OrderedDict([
('can_copy', can_copy), ('can_copy_without_user_input', can_copy),
('templates_unable_to_copy', [] if can_copy else ['all']),
('credentials_unable_to_copy', [] if can_copy else ['all']),
('inventories_unable_to_copy', [] if can_copy else ['all'])
])
if messages and can_copy:
data['can_copy_without_user_input'] = False
data.update(messages)
return Response(data)
def post(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'copy', obj):
raise PermissionDenied()
new_obj = obj.user_copy(request.user)
if request.user not in new_obj.admin_role:
new_obj.admin_role.members.add(request.user)
data = OrderedDict()
data.update(WorkflowJobTemplateSerializer(
new_obj, context=self.get_serializer_context()).to_representation(new_obj))
return Response(data, status=status.HTTP_201_CREATED)
class WorkflowJobTemplateLabelList(WorkflowsEnforcementMixin, JobTemplateLabelList):
parent_model = WorkflowJobTemplate
new_in_310 = True
class WorkflowJobTemplateLaunch(WorkflowsEnforcementMixin, RetrieveAPIView):
model = WorkflowJobTemplate
serializer_class = WorkflowJobLaunchSerializer
new_in_310 = True
is_job_start = True
always_allow_superuser = False
def update_raw_data(self, data):
try:
obj = self.get_object()
except PermissionDenied:
return data
extra_vars = data.pop('extra_vars', None) or {}
if obj:
for v in obj.variables_needed_to_start:
extra_vars.setdefault(v, u'')
if extra_vars:
data['extra_vars'] = extra_vars
return data
def post(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'start', obj):
raise PermissionDenied()
serializer = self.serializer_class(instance=obj, data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
prompted_fields, ignored_fields = obj._accept_or_ignore_job_kwargs(**request.data)
new_job = obj.create_unified_job(**prompted_fields)
new_job.signal_start()
data = OrderedDict()
data['ignored_fields'] = ignored_fields
data.update(WorkflowJobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
data['workflow_job'] = new_job.id
return Response(data, status=status.HTTP_201_CREATED)
class WorkflowJobRelaunch(WorkflowsEnforcementMixin, GenericAPIView):
model = WorkflowJob
serializer_class = EmptySerializer
is_job_start = True
new_in_310 = True
def check_object_permissions(self, request, obj):
if request.method == 'POST' and obj:
relaunch_perm, messages = request.user.can_access_with_errors(self.model, 'start', obj)
if not relaunch_perm and 'workflow_job_template' in messages:
self.permission_denied(request, message=messages['workflow_job_template'])
return super(WorkflowJobRelaunch, self).check_object_permissions(request, obj)
def get(self, request, *args, **kwargs):
return Response({})
def post(self, request, *args, **kwargs):
obj = self.get_object()
new_workflow_job = obj.create_relaunch_workflow_job()
new_workflow_job.signal_start()
data = WorkflowJobSerializer(new_workflow_job, context=self.get_serializer_context()).data
headers = {'Location': new_workflow_job.get_absolute_url(request=request)}
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
class WorkflowJobTemplateWorkflowNodesList(WorkflowsEnforcementMixin, SubListCreateAPIView):
model = WorkflowJobTemplateNode
serializer_class = WorkflowJobTemplateNodeListSerializer
parent_model = WorkflowJobTemplate
relationship = 'workflow_job_template_nodes'
parent_key = 'workflow_job_template'
new_in_310 = True
def update_raw_data(self, data):
for fd in ['job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags']:
data[fd] = None
return super(WorkflowJobTemplateWorkflowNodesList, self).update_raw_data(data)
def get_queryset(self):
return super(WorkflowJobTemplateWorkflowNodesList, self).get_queryset().order_by('id')
class WorkflowJobTemplateJobsList(WorkflowsEnforcementMixin, SubListAPIView):
model = WorkflowJob
serializer_class = WorkflowJobListSerializer
parent_model = WorkflowJobTemplate
relationship = 'workflow_jobs'
parent_key = 'workflow_job_template'
new_in_310 = True
class WorkflowJobTemplateSchedulesList(WorkflowsEnforcementMixin, SubListCreateAPIView):
view_name = _("Workflow Job Template Schedules")
model = Schedule
serializer_class = ScheduleSerializer
parent_model = WorkflowJobTemplate
relationship = 'schedules'
parent_key = 'unified_job_template'
new_in_310 = True
class WorkflowJobTemplateNotificationTemplatesAnyList(WorkflowsEnforcementMixin, SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = WorkflowJobTemplate
relationship = 'notification_templates_any'
new_in_310 = True
class WorkflowJobTemplateNotificationTemplatesErrorList(WorkflowsEnforcementMixin, SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = WorkflowJobTemplate
relationship = 'notification_templates_error'
new_in_310 = True
class WorkflowJobTemplateNotificationTemplatesSuccessList(WorkflowsEnforcementMixin, SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = WorkflowJobTemplate
relationship = 'notification_templates_success'
new_in_310 = True
class WorkflowJobTemplateAccessList(WorkflowsEnforcementMixin, ResourceAccessList):
model = User # needs to be User for AccessLists's
parent_model = WorkflowJobTemplate
new_in_310 = True
class WorkflowJobTemplateObjectRolesList(WorkflowsEnforcementMixin, SubListAPIView):
model = Role
serializer_class = RoleSerializer
parent_model = WorkflowJobTemplate
new_in_310 = True
def get_queryset(self):
po = self.get_parent_object()
content_type = ContentType.objects.get_for_model(self.parent_model)
return Role.objects.filter(content_type=content_type, object_id=po.pk)
class WorkflowJobTemplateActivityStreamList(WorkflowsEnforcementMixin, ActivityStreamEnforcementMixin, SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = WorkflowJobTemplate
relationship = 'activitystream_set'
new_in_310 = True
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(Q(workflow_job_template=parent) |
Q(workflow_job_template_node__workflow_job_template=parent)).distinct()
class WorkflowJobList(WorkflowsEnforcementMixin, ListCreateAPIView):
model = WorkflowJob
serializer_class = WorkflowJobListSerializer
new_in_310 = True
class WorkflowJobDetail(WorkflowsEnforcementMixin, UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
model = WorkflowJob
serializer_class = WorkflowJobSerializer
new_in_310 = True
class WorkflowJobWorkflowNodesList(WorkflowsEnforcementMixin, SubListAPIView):
model = WorkflowJobNode
serializer_class = WorkflowJobNodeListSerializer
always_allow_superuser = True
parent_model = WorkflowJob
relationship = 'workflow_job_nodes'
parent_key = 'workflow_job'
new_in_310 = True
def get_queryset(self):
return super(WorkflowJobWorkflowNodesList, self).get_queryset().order_by('id')
class WorkflowJobCancel(WorkflowsEnforcementMixin, RetrieveAPIView):
model = WorkflowJob
serializer_class = WorkflowJobCancelSerializer
is_job_cancel = True
new_in_310 = True
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
#TODO: Figure out whether an immediate schedule is needed.
run_job_complete.delay(obj.id)
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class WorkflowJobNotificationsList(WorkflowsEnforcementMixin, SubListAPIView):
model = Notification
serializer_class = NotificationSerializer
parent_model = WorkflowJob
relationship = 'notifications'
new_in_310 = True
class WorkflowJobActivityStreamList(WorkflowsEnforcementMixin, ActivityStreamEnforcementMixin, SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = WorkflowJob
relationship = 'activitystream_set'
new_in_310 = True
class SystemJobTemplateList(ListAPIView):
model = SystemJobTemplate
serializer_class = SystemJobTemplateSerializer
new_in_210 = True
def get(self, request, *args, **kwargs):
if not request.user.is_superuser and not request.user.is_system_auditor:
raise PermissionDenied(_("Superuser privileges needed."))
return super(SystemJobTemplateList, self).get(request, *args, **kwargs)
class SystemJobTemplateDetail(RetrieveAPIView):
model = SystemJobTemplate
serializer_class = SystemJobTemplateSerializer
new_in_210 = True
class SystemJobTemplateLaunch(GenericAPIView):
model = SystemJobTemplate
serializer_class = EmptySerializer
is_job_start = True
new_in_210 = True
def get(self, request, *args, **kwargs):
return Response({})
def post(self, request, *args, **kwargs):
obj = self.get_object()
new_job = obj.create_unified_job(extra_vars=request.data.get('extra_vars', {}))
new_job.signal_start()
data = dict(system_job=new_job.id)
return Response(data, status=status.HTTP_201_CREATED)
class SystemJobTemplateSchedulesList(SubListCreateAPIView):
view_name = _("System Job Template Schedules")
model = Schedule
serializer_class = ScheduleSerializer
parent_model = SystemJobTemplate
relationship = 'schedules'
parent_key = 'unified_job_template'
new_in_210 = True
class SystemJobTemplateJobsList(SubListAPIView):
model = SystemJob
serializer_class = SystemJobListSerializer
parent_model = SystemJobTemplate
relationship = 'jobs'
parent_key = 'system_job_template'
new_in_210 = True
class SystemJobTemplateNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = SystemJobTemplate
relationship = 'notification_templates_any'
new_in_300 = True
class SystemJobTemplateNotificationTemplatesErrorList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = SystemJobTemplate
relationship = 'notification_templates_error'
new_in_300 = True
class SystemJobTemplateNotificationTemplatesSuccessList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = SystemJobTemplate
relationship = 'notification_templates_success'
new_in_300 = True
class JobList(ListCreateAPIView):
model = Job
metadata_class = JobTypeMetadata
serializer_class = JobListSerializer
@property
def allowed_methods(self):
methods = super(JobList, self).allowed_methods
if get_request_version(self.request) > 1:
methods.remove('POST')
return methods
# NOTE: Remove in 3.3, switch ListCreateAPIView to ListAPIView
def post(self, request, *args, **kwargs):
if get_request_version(self.request) > 1:
return Response({"error": _("POST not allowed for Job launching in version 2 of the api")},
status=status.HTTP_405_METHOD_NOT_ALLOWED)
return super(JobList, self).post(request, *args, **kwargs)
class JobDetail(UnifiedJobDeletionMixin, RetrieveUpdateDestroyAPIView):
model = Job
metadata_class = JobTypeMetadata
serializer_class = JobSerializer
def update(self, request, *args, **kwargs):
obj = self.get_object()
# Only allow changes (PUT/PATCH) when job status is "new".
if obj.status != 'new':
return self.http_method_not_allowed(request, *args, **kwargs)
return super(JobDetail, self).update(request, *args, **kwargs)
class JobExtraCredentialsList(SubListAPIView):
model = Credential
serializer_class = CredentialSerializer
parent_model = Job
relationship = 'extra_credentials'
new_in_320 = True
new_in_api_v2 = True
class JobLabelList(SubListAPIView):
model = Label
serializer_class = LabelSerializer
parent_model = Job
relationship = 'labels'
parent_key = 'job'
new_in_300 = True
class WorkflowJobLabelList(WorkflowsEnforcementMixin, JobLabelList):
parent_model = WorkflowJob
new_in_310 = True
class JobActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = Job
relationship = 'activitystream_set'
new_in_145 = True
# TODO: remove endpoint in 3.3
class JobStart(GenericAPIView):
model = Job
serializer_class = EmptySerializer
is_job_start = True
deprecated = True
def v2_not_allowed(self):
return Response({'detail': 'Action only possible through v1 API.'},
status=status.HTTP_404_NOT_FOUND)
def get(self, request, *args, **kwargs):
if get_request_version(request) > 1:
return self.v2_not_allowed()
obj = self.get_object()
data = dict(
can_start=obj.can_start,
)
if obj.can_start:
data['passwords_needed_to_start'] = obj.passwords_needed_to_start
data['ask_variables_on_launch'] = obj.ask_variables_on_launch
return Response(data)
def post(self, request, *args, **kwargs):
if get_request_version(request) > 1:
return self.v2_not_allowed()
obj = self.get_object()
if obj.can_start:
result = obj.signal_start(**request.data)
if not result:
data = dict(passwords_needed_to_start=obj.passwords_needed_to_start)
return Response(data, status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class JobCancel(RetrieveAPIView):
model = Job
serializer_class = JobCancelSerializer
is_job_cancel = True
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class JobRelaunch(RetrieveAPIView, GenericAPIView):
model = Job
serializer_class = JobRelaunchSerializer
is_job_start = True
@csrf_exempt
@transaction.non_atomic_requests
def dispatch(self, *args, **kwargs):
return super(JobRelaunch, self).dispatch(*args, **kwargs)
def check_object_permissions(self, request, obj):
if request.method == 'POST' and obj:
relaunch_perm, messages = request.user.can_access_with_errors(self.model, 'start', obj)
if not relaunch_perm and 'detail' in messages:
self.permission_denied(request, message=messages['detail'])
return super(JobRelaunch, self).check_object_permissions(request, obj)
def post(self, request, *args, **kwargs):
obj = self.get_object()
# Note: is_valid() may modify request.data
# It will remove any key/value pair who's key is not in the 'passwords_needed_to_start' list
serializer = self.serializer_class(data=request.data, context={'obj': obj, 'data': request.data})
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
new_job = obj.copy_unified_job()
result = new_job.signal_start(**request.data)
if not result:
data = dict(passwords_needed_to_start=new_job.passwords_needed_to_start)
return Response(data, status=status.HTTP_400_BAD_REQUEST)
else:
data = JobSerializer(new_job, context=self.get_serializer_context()).data
# Add job key to match what old relaunch returned.
data['job'] = new_job.id
headers = {'Location': new_job.get_absolute_url(request=request)}
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
class JobNotificationsList(SubListAPIView):
model = Notification
serializer_class = NotificationSerializer
parent_model = Job
relationship = 'notifications'
new_in_300 = True
class BaseJobHostSummariesList(SubListAPIView):
model = JobHostSummary
serializer_class = JobHostSummarySerializer
parent_model = None # Subclasses must define this attribute.
relationship = 'job_host_summaries'
view_name = _('Job Host Summaries List')
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
return getattr(parent, self.relationship).select_related('job', 'job__job_template', 'host')
class HostJobHostSummariesList(BaseJobHostSummariesList):
parent_model = Host
class GroupJobHostSummariesList(BaseJobHostSummariesList):
parent_model = Group
class JobJobHostSummariesList(BaseJobHostSummariesList):
parent_model = Job
class JobHostSummaryDetail(RetrieveAPIView):
model = JobHostSummary
serializer_class = JobHostSummarySerializer
class JobEventList(ListAPIView):
model = JobEvent
serializer_class = JobEventSerializer
class JobEventDetail(RetrieveAPIView):
model = JobEvent
serializer_class = JobEventSerializer
class JobEventChildrenList(SubListAPIView):
model = JobEvent
serializer_class = JobEventSerializer
parent_model = JobEvent
relationship = 'children'
view_name = _('Job Event Children List')
class JobEventHostsList(SubListAPIView):
model = Host
serializer_class = HostSerializer
parent_model = JobEvent
relationship = 'hosts'
view_name = _('Job Event Hosts List')
capabilities_prefetch = ['inventory.admin']
class BaseJobEventsList(SubListAPIView):
model = JobEvent
serializer_class = JobEventSerializer
parent_model = None # Subclasses must define this attribute.
relationship = 'job_events'
view_name = _('Job Events List')
search_fields = ('stdout',)
def finalize_response(self, request, response, *args, **kwargs):
response['X-UI-Max-Events'] = settings.RECOMMENDED_MAX_EVENTS_DISPLAY_HEADER
return super(BaseJobEventsList, self).finalize_response(request, response, *args, **kwargs)
class HostJobEventsList(BaseJobEventsList):
parent_model = Host
def get_queryset(self):
parent_obj = self.get_parent_object()
self.check_parent_access(parent_obj)
qs = self.request.user.get_queryset(self.model).filter(
Q(host=parent_obj) | Q(hosts=parent_obj)).distinct()
return qs
class GroupJobEventsList(BaseJobEventsList):
parent_model = Group
class JobJobEventsList(BaseJobEventsList):
parent_model = Job
def get_queryset(self):
job = self.get_parent_object()
self.check_parent_access(job)
qs = job.job_events
qs = qs.select_related('host')
qs = qs.prefetch_related('hosts', 'children')
return qs.all()
class AdHocCommandList(ListCreateAPIView):
model = AdHocCommand
serializer_class = AdHocCommandListSerializer
new_in_220 = True
always_allow_superuser = False
@csrf_exempt
@transaction.non_atomic_requests
def dispatch(self, *args, **kwargs):
return super(AdHocCommandList, self).dispatch(*args, **kwargs)
def update_raw_data(self, data):
# Hide inventory and limit fields from raw data, since they will be set
# automatically by sub list create view.
parent_model = getattr(self, 'parent_model', None)
if parent_model in (Host, Group):
data.pop('inventory', None)
data.pop('limit', None)
return super(AdHocCommandList, self).update_raw_data(data)
def create(self, request, *args, **kwargs):
# Inject inventory ID and limit if parent objects is a host/group.
if hasattr(self, 'get_parent_object') and not getattr(self, 'parent_key', None):
data = request.data
# HACK: Make request data mutable.
if getattr(data, '_mutable', None) is False:
data._mutable = True
parent_obj = self.get_parent_object()
if isinstance(parent_obj, (Host, Group)):
data['inventory'] = parent_obj.inventory_id
data['limit'] = parent_obj.name
# Check for passwords needed before creating ad hoc command.
credential_pk = get_pk_from_dict(request.data, 'credential')
if credential_pk:
credential = get_object_or_400(Credential, pk=credential_pk)
needed = credential.passwords_needed
provided = dict([(field, request.data.get(field, '')) for field in needed])
if not all(provided.values()):
data = dict(passwords_needed_to_start=needed)
return Response(data, status=status.HTTP_400_BAD_REQUEST)
response = super(AdHocCommandList, self).create(request, *args, **kwargs)
if response.status_code != status.HTTP_201_CREATED:
return response
# Start ad hoc command running when created.
ad_hoc_command = get_object_or_400(self.model, pk=response.data['id'])
result = ad_hoc_command.signal_start(**request.data)
if not result:
data = dict(passwords_needed_to_start=ad_hoc_command.passwords_needed_to_start)
return Response(data, status=status.HTTP_400_BAD_REQUEST)
return response
class InventoryAdHocCommandsList(AdHocCommandList, SubListCreateAPIView):
parent_model = Inventory
relationship = 'ad_hoc_commands'
parent_key = 'inventory'
class GroupAdHocCommandsList(AdHocCommandList, SubListCreateAPIView):
parent_model = Group
relationship = 'ad_hoc_commands'
class HostAdHocCommandsList(AdHocCommandList, SubListCreateAPIView):
parent_model = Host
relationship = 'ad_hoc_commands'
class AdHocCommandDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
model = AdHocCommand
serializer_class = AdHocCommandSerializer
new_in_220 = True
class AdHocCommandCancel(RetrieveAPIView):
model = AdHocCommand
serializer_class = AdHocCommandCancelSerializer
is_job_cancel = True
new_in_220 = True
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class AdHocCommandRelaunch(GenericAPIView):
model = AdHocCommand
serializer_class = AdHocCommandRelaunchSerializer
is_job_start = True
new_in_220 = True
# FIXME: Figure out why OPTIONS request still shows all fields.
@csrf_exempt
@transaction.non_atomic_requests
def dispatch(self, *args, **kwargs):
return super(AdHocCommandRelaunch, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
obj = self.get_object()
data = dict(passwords_needed_to_start=obj.passwords_needed_to_start)
return Response(data)
def post(self, request, *args, **kwargs):
obj = self.get_object()
# Re-validate ad hoc command against serializer to check if module is
# still allowed.
data = {}
for field in ('job_type', 'inventory_id', 'limit', 'credential_id',
'module_name', 'module_args', 'forks', 'verbosity',
'extra_vars', 'become_enabled'):
if field.endswith('_id'):
data[field[:-3]] = getattr(obj, field)
else:
data[field] = getattr(obj, field)
serializer = AdHocCommandSerializer(data=data, context=self.get_serializer_context())
if not serializer.is_valid():
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
# Check for passwords needed before copying ad hoc command.
needed = obj.passwords_needed_to_start
provided = dict([(field, request.data.get(field, '')) for field in needed])
if not all(provided.values()):
data = dict(passwords_needed_to_start=needed)
return Response(data, status=status.HTTP_400_BAD_REQUEST)
# Copy and start the new ad hoc command.
new_ad_hoc_command = obj.copy()
result = new_ad_hoc_command.signal_start(**request.data)
if not result:
data = dict(passwords_needed_to_start=new_ad_hoc_command.passwords_needed_to_start)
return Response(data, status=status.HTTP_400_BAD_REQUEST)
else:
data = AdHocCommandSerializer(new_ad_hoc_command, context=self.get_serializer_context()).data
# Add ad_hoc_command key to match what was previously returned.
data['ad_hoc_command'] = new_ad_hoc_command.id
headers = {'Location': new_ad_hoc_command.get_absolute_url(request=request)}
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
class AdHocCommandEventList(ListAPIView):
model = AdHocCommandEvent
serializer_class = AdHocCommandEventSerializer
new_in_220 = True
class AdHocCommandEventDetail(RetrieveAPIView):
model = AdHocCommandEvent
serializer_class = AdHocCommandEventSerializer
new_in_220 = True
class BaseAdHocCommandEventsList(SubListAPIView):
model = AdHocCommandEvent
serializer_class = AdHocCommandEventSerializer
parent_model = None # Subclasses must define this attribute.
relationship = 'ad_hoc_command_events'
view_name = _('Ad Hoc Command Events List')
new_in_220 = True
class HostAdHocCommandEventsList(BaseAdHocCommandEventsList):
parent_model = Host
new_in_220 = True
#class GroupJobEventsList(BaseJobEventsList):
# parent_model = Group
class AdHocCommandAdHocCommandEventsList(BaseAdHocCommandEventsList):
parent_model = AdHocCommand
new_in_220 = True
class AdHocCommandActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = AdHocCommand
relationship = 'activitystream_set'
new_in_220 = True
class AdHocCommandNotificationsList(SubListAPIView):
model = Notification
serializer_class = NotificationSerializer
parent_model = AdHocCommand
relationship = 'notifications'
new_in_300 = True
class SystemJobList(ListCreateAPIView):
model = SystemJob
serializer_class = SystemJobListSerializer
new_in_210 = True
def get(self, request, *args, **kwargs):
if not request.user.is_superuser and not request.user.is_system_auditor:
raise PermissionDenied(_("Superuser privileges needed."))
return super(SystemJobList, self).get(request, *args, **kwargs)
class SystemJobDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
model = SystemJob
serializer_class = SystemJobSerializer
new_in_210 = True
class SystemJobCancel(RetrieveAPIView):
model = SystemJob
serializer_class = SystemJobCancelSerializer
is_job_cancel = True
new_in_210 = True
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class SystemJobNotificationsList(SubListAPIView):
model = Notification
serializer_class = NotificationSerializer
parent_model = SystemJob
relationship = 'notifications'
new_in_300 = True
class UnifiedJobTemplateList(ListAPIView):
model = UnifiedJobTemplate
serializer_class = UnifiedJobTemplateSerializer
new_in_148 = True
capabilities_prefetch = [
'admin', 'execute',
{'copy': ['jobtemplate.project.use', 'jobtemplate.inventory.use',
'jobtemplate.credential.use', 'jobtemplate.vault_credential.use',
'workflowjobtemplate.organization.admin']}
]
class UnifiedJobList(ListAPIView):
model = UnifiedJob
serializer_class = UnifiedJobListSerializer
new_in_148 = True
class StdoutANSIFilter(object):
def __init__(self, fileobj):
self.fileobj = fileobj
self.extra_data = ''
if hasattr(fileobj,'close'):
self.close = fileobj.close
def read(self, size=-1):
data = self.extra_data
while size > 0 and len(data) < size:
line = self.fileobj.readline(size)
if not line:
break
# Remove ANSI escape sequences used to embed event data.
line = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', line)
# Remove ANSI color escape sequences.
line = re.sub(r'\x1b[^m]*m', '', line)
data += line
if size > 0 and len(data) > size:
self.extra_data = data[size:]
data = data[:size]
else:
self.extra_data = ''
return data
class UnifiedJobStdout(RetrieveAPIView):
authentication_classes = [TokenGetAuthentication] + api_settings.DEFAULT_AUTHENTICATION_CLASSES
serializer_class = UnifiedJobStdoutSerializer
renderer_classes = [BrowsableAPIRenderer, renderers.StaticHTMLRenderer,
PlainTextRenderer, AnsiTextRenderer,
renderers.JSONRenderer, DownloadTextRenderer, AnsiDownloadRenderer]
filter_backends = ()
new_in_148 = True
def retrieve(self, request, *args, **kwargs):
unified_job = self.get_object()
obj_size = unified_job.result_stdout_size
if request.accepted_renderer.format not in {'txt_download', 'ansi_download'} and obj_size > settings.STDOUT_MAX_BYTES_DISPLAY:
response_message = _("Standard Output too large to display (%(text_size)d bytes), "
"only download supported for sizes over %(supported_size)d bytes") % {
'text_size': obj_size, 'supported_size': settings.STDOUT_MAX_BYTES_DISPLAY}
if request.accepted_renderer.format == 'json':
return Response({'range': {'start': 0, 'end': 1, 'absolute_end': 1}, 'content': response_message})
else:
return Response(response_message)
if request.accepted_renderer.format in ('html', 'api', 'json'):
content_format = request.query_params.get('content_format', 'html')
content_encoding = request.query_params.get('content_encoding', None)
start_line = request.query_params.get('start_line', 0)
end_line = request.query_params.get('end_line', None)
dark_val = request.query_params.get('dark', '')
dark = bool(dark_val and dark_val[0].lower() in ('1', 't', 'y'))
content_only = bool(request.accepted_renderer.format in ('api', 'json'))
dark_bg = (content_only and dark) or (not content_only and (dark or not dark_val))
content, start, end, absolute_end = unified_job.result_stdout_raw_limited(start_line, end_line)
# Remove any ANSI escape sequences containing job event data.
content = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', content)
body = ansiconv.to_html(cgi.escape(content))
context = {
'title': get_view_name(self.__class__),
'body': mark_safe(body),
'dark': dark_bg,
'content_only': content_only,
}
data = render_to_string('api/stdout.html', context).strip()
if request.accepted_renderer.format == 'api':
return Response(mark_safe(data))
if request.accepted_renderer.format == 'json':
if content_encoding == 'base64' and content_format == 'ansi':
return Response({'range': {'start': start, 'end': end, 'absolute_end': absolute_end}, 'content': b64encode(content)})
elif content_format == 'html':
return Response({'range': {'start': start, 'end': end, 'absolute_end': absolute_end}, 'content': body})
return Response(data)
elif request.accepted_renderer.format == 'txt':
return Response(unified_job.result_stdout)
elif request.accepted_renderer.format == 'ansi':
return Response(unified_job.result_stdout_raw)
elif request.accepted_renderer.format in {'txt_download', 'ansi_download'}:
if not os.path.exists(unified_job.result_stdout_file):
write_fd = open(unified_job.result_stdout_file, 'w')
with connection.cursor() as cursor:
try:
tablename, related_name = {
Job: ('main_jobevent', 'job_id'),
AdHocCommand: ('main_adhoccommandevent', 'ad_hoc_command_id'),
}.get(unified_job.__class__, (None, None))
if tablename is None:
# stdout job event reconstruction isn't supported
# for certain job types (such as inventory syncs),
# so just grab the raw stdout from the DB
write_fd.write(unified_job.result_stdout_text)
write_fd.close()
else:
cursor.copy_expert(
"copy (select stdout from {} where {}={} order by start_line) to stdout".format(
tablename,
related_name,
unified_job.id
),
write_fd
)
write_fd.close()
subprocess.Popen("sed -i 's/\\\\r\\\\n/\\n/g' {}".format(unified_job.result_stdout_file),
shell=True).wait()
except Exception as e:
return Response({"error": _("Error generating stdout download file: {}".format(e))})
try:
content_fd = open(unified_job.result_stdout_file, 'r')
if request.accepted_renderer.format == 'txt_download':
# For txt downloads, filter out ANSI escape sequences.
content_fd = StdoutANSIFilter(content_fd)
suffix = ''
else:
suffix = '_ansi'
response = HttpResponse(FileWrapper(content_fd), content_type='text/plain')
response["Content-Disposition"] = 'attachment; filename="job_%s%s.txt"' % (str(unified_job.id), suffix)
return response
except Exception as e:
return Response({"error": _("Error generating stdout download file: %s") % str(e)}, status=status.HTTP_400_BAD_REQUEST)
else:
return super(UnifiedJobStdout, self).retrieve(request, *args, **kwargs)
class ProjectUpdateStdout(UnifiedJobStdout):
model = ProjectUpdate
new_in_13 = True
class InventoryUpdateStdout(UnifiedJobStdout):
model = InventoryUpdate
class JobStdout(UnifiedJobStdout):
model = Job
class AdHocCommandStdout(UnifiedJobStdout):
model = AdHocCommand
new_in_220 = True
class NotificationTemplateList(ListCreateAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
new_in_300 = True
class NotificationTemplateDetail(RetrieveUpdateDestroyAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
new_in_300 = True
def delete(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'delete', obj):
return Response(status=status.HTTP_404_NOT_FOUND)
if obj.notifications.filter(status='pending').exists():
return Response({"error": _("Delete not allowed while there are pending notifications")},
status=status.HTTP_405_METHOD_NOT_ALLOWED)
return super(NotificationTemplateDetail, self).delete(request, *args, **kwargs)
class NotificationTemplateTest(GenericAPIView):
view_name = _('Notification Template Test')
model = NotificationTemplate
serializer_class = EmptySerializer
new_in_300 = True
is_job_start = True
def post(self, request, *args, **kwargs):
obj = self.get_object()
notification = obj.generate_notification("Tower Notification Test {} {}".format(obj.id, settings.TOWER_URL_BASE),
{"body": "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)})
if not notification:
return Response({}, status=status.HTTP_400_BAD_REQUEST)
else:
send_notifications.delay([notification.id])
headers = {'Location': notification.get_absolute_url(request=request)}
return Response({"notification": notification.id},
headers=headers,
status=status.HTTP_202_ACCEPTED)
class NotificationTemplateNotificationList(SubListAPIView):
model = Notification
serializer_class = NotificationSerializer
parent_model = NotificationTemplate
relationship = 'notifications'
parent_key = 'notification_template'
new_in_300 = True
class NotificationList(ListAPIView):
model = Notification
serializer_class = NotificationSerializer
new_in_300 = True
class NotificationDetail(RetrieveAPIView):
model = Notification
serializer_class = NotificationSerializer
new_in_300 = True
class LabelList(ListCreateAPIView):
model = Label
serializer_class = LabelSerializer
new_in_300 = True
class LabelDetail(RetrieveUpdateAPIView):
model = Label
serializer_class = LabelSerializer
new_in_300 = True
class ActivityStreamList(ActivityStreamEnforcementMixin, SimpleListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
new_in_145 = True
class ActivityStreamDetail(ActivityStreamEnforcementMixin, RetrieveAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
new_in_145 = True
class RoleList(ListAPIView):
model = Role
serializer_class = RoleSerializer
permission_classes = (IsAuthenticated,)
new_in_300 = True
def get_queryset(self):
result = Role.visible_roles(self.request.user)
# Sanity check: is the requesting user an orphaned non-admin/auditor?
# if yes, make system admin/auditor mandatorily visible.
if not self.request.user.organizations.exists() and\
not self.request.user.is_superuser and\
not self.request.user.is_system_auditor:
mandatories = ('system_administrator', 'system_auditor')
super_qs = Role.objects.filter(singleton_name__in=mandatories)
result = result | super_qs
return result
class RoleDetail(RetrieveAPIView):
model = Role
serializer_class = RoleSerializer
new_in_300 = True
class RoleUsersList(SubListAttachDetachAPIView):
model = User
serializer_class = UserSerializer
parent_model = Role
relationship = 'members'
new_in_300 = True
def get_queryset(self):
role = self.get_parent_object()
self.check_parent_access(role)
return role.members.all()
def post(self, request, *args, **kwargs):
# Forbid implicit user creation here
sub_id = request.data.get('id', None)
if not sub_id:
return super(RoleUsersList, self).post(request)
user = get_object_or_400(User, pk=sub_id)
role = self.get_parent_object()
if role == self.request.user.admin_role:
raise PermissionDenied(_('You may not perform any action with your own admin_role.'))
user_content_type = ContentType.objects.get_for_model(User)
if role.content_type == user_content_type:
raise PermissionDenied(_('You may not change the membership of a users admin_role'))
credential_content_type = ContentType.objects.get_for_model(Credential)
if role.content_type == credential_content_type:
if role.content_object.organization and user not in role.content_object.organization.member_role:
data = dict(msg=_("You cannot grant credential access to a user not in the credentials' organization"))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
if not role.content_object.organization and not request.user.is_superuser:
data = dict(msg=_("You cannot grant private credential access to another user"))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
return super(RoleUsersList, self).post(request, *args, **kwargs)
class RoleTeamsList(SubListAttachDetachAPIView):
model = Team
serializer_class = TeamSerializer
parent_model = Role
relationship = 'member_role.parents'
permission_classes = (IsAuthenticated,)
new_in_300 = True
def get_queryset(self):
role = self.get_parent_object()
self.check_parent_access(role)
return Team.objects.filter(member_role__children=role)
def post(self, request, pk, *args, **kwargs):
sub_id = request.data.get('id', None)
if not sub_id:
return super(RoleTeamsList, self).post(request)
team = get_object_or_400(Team, pk=sub_id)
role = Role.objects.get(pk=self.kwargs['pk'])
organization_content_type = ContentType.objects.get_for_model(Organization)
if role.content_type == organization_content_type:
data = dict(msg=_("You cannot assign an Organization role as a child role for a Team."))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
credential_content_type = ContentType.objects.get_for_model(Credential)
if role.content_type == credential_content_type:
if not role.content_object.organization or role.content_object.organization.id != team.organization.id:
data = dict(msg=_("You cannot grant credential access to a team when the Organization field isn't set, or belongs to a different organization"))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
action = 'attach'
if request.data.get('disassociate', None):
action = 'unattach'
if role.is_singleton() and action == 'attach':
data = dict(msg=_("You cannot grant system-level permissions to a team."))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
if not request.user.can_access(self.parent_model, action, role, team,
self.relationship, request.data,
skip_sub_obj_read_check=False):
raise PermissionDenied()
if request.data.get('disassociate', None):
team.member_role.children.remove(role)
else:
team.member_role.children.add(role)
return Response(status=status.HTTP_204_NO_CONTENT)
class RoleParentsList(SubListAPIView):
model = Role
serializer_class = RoleSerializer
parent_model = Role
relationship = 'parents'
permission_classes = (IsAuthenticated,)
new_in_300 = True
def get_queryset(self):
role = Role.objects.get(pk=self.kwargs['pk'])
return Role.filter_visible_roles(self.request.user, role.parents.all())
class RoleChildrenList(SubListAPIView):
model = Role
serializer_class = RoleSerializer
parent_model = Role
relationship = 'children'
permission_classes = (IsAuthenticated,)
new_in_300 = True
def get_queryset(self):
role = Role.objects.get(pk=self.kwargs['pk'])
return Role.filter_visible_roles(self.request.user, role.children.all())
# Create view functions for all of the class-based views to simplify inclusion
# in URL patterns and reverse URL lookups, converting CamelCase names to
# lowercase_with_underscore (e.g. MyView.as_view() becomes my_view).
this_module = sys.modules[__name__]
for attr, value in locals().items():
if isinstance(value, type) and issubclass(value, APIView):
name = camelcase_to_underscore(attr)
view = value.as_view()
setattr(this_module, name, view)
|
unknown
|
codeparrot/codeparrot-clean
| ||
'''
Created on 22 Apr 2013
@author: maxz
'''
import unittest
import numpy
import GPy
import itertools
from GPy.core import Model
from GPy.core.parameterization.param import Param
from GPy.core.parameterization.transformations import Logexp
from GPy.core.parameterization.variational import NormalPosterior
class PsiStatModel(Model):
def __init__(self, which, X, X_variance, Z, num_inducing, kernel):
super(PsiStatModel, self).__init__(name='psi stat test')
self.which = which
self.X = Param("X", X)
self.X_variance = Param('X_variance', X_variance, Logexp())
self.q = NormalPosterior(self.X, self.X_variance)
self.Z = Param("Z", Z)
self.N, self.input_dim = X.shape
self.num_inducing, input_dim = Z.shape
assert self.input_dim == input_dim, "shape missmatch: Z:{!s} X:{!s}".format(Z.shape, X.shape)
self.kern = kernel
self.psi_ = self.kern.__getattribute__(self.which)(self.Z, self.q)
self.add_parameters(self.q, self.Z, self.kern)
def log_likelihood(self):
return self.kern.__getattribute__(self.which)(self.Z, self.X, self.X_variance).sum()
def parameters_changed(self):
psimu, psiS = self.kern.__getattribute__("d" + self.which + "_dmuS")(numpy.ones_like(self.psi_), self.Z, self.q)
self.X.gradient = psimu
self.X_variance.gradient = psiS
#psimu, psiS = numpy.ones(self.N * self.input_dim), numpy.ones(self.N * self.input_dim)
try: psiZ = self.kern.__getattribute__("d" + self.which + "_dZ")(numpy.ones_like(self.psi_), self.Z, self.q)
except AttributeError: psiZ = numpy.zeros_like(self.Z)
self.Z.gradient = psiZ
#psiZ = numpy.ones(self.num_inducing * self.input_dim)
N,M = self.X.shape[0], self.Z.shape[0]
dL_dpsi0, dL_dpsi1, dL_dpsi2 = numpy.zeros([N]), numpy.zeros([N,M]), numpy.zeros([N,M,M])
if self.which == 'psi0': dL_dpsi0 += 1
if self.which == 'psi1': dL_dpsi1 += 1
if self.which == 'psi2': dL_dpsi2 += 1
self.kern.update_gradients_variational(numpy.zeros([1,1]),
dL_dpsi0,
dL_dpsi1,
dL_dpsi2, self.X, self.X_variance, self.Z)
class DPsiStatTest(unittest.TestCase):
input_dim = 5
N = 50
num_inducing = 10
input_dim = 20
X = numpy.random.randn(N, input_dim)
X_var = .5 * numpy.ones_like(X) + .4 * numpy.clip(numpy.random.randn(*X.shape), 0, 1)
Z = numpy.random.permutation(X)[:num_inducing]
Y = X.dot(numpy.random.randn(input_dim, input_dim))
# kernels = [GPy.kern.Linear(input_dim, ARD=True, variances=numpy.random.rand(input_dim)), GPy.kern.RBF(input_dim, ARD=True), GPy.kern.Bias(input_dim)]
kernels = [
GPy.kern.Linear(input_dim),
GPy.kern.RBF(input_dim),
#GPy.kern.Bias(input_dim),
#GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim),
#GPy.kern.RBF(input_dim) + GPy.kern.Bias(input_dim)
]
def testPsi0(self):
for k in self.kernels:
m = PsiStatModel('psi0', X=self.X, X_variance=self.X_var, Z=self.Z,\
num_inducing=self.num_inducing, kernel=k)
m.randomize()
assert m.checkgrad(), "{} x psi0".format("+".join(map(lambda x: x.name, k._parameters_)))
def testPsi1(self):
for k in self.kernels:
m = PsiStatModel('psi1', X=self.X, X_variance=self.X_var, Z=self.Z,
num_inducing=self.num_inducing, kernel=k)
m.randomize()
assert m.checkgrad(), "{} x psi1".format("+".join(map(lambda x: x.name, k._parameters_)))
def testPsi2_lin(self):
k = self.kernels[0]
m = PsiStatModel('psi2', X=self.X, X_variance=self.X_var, Z=self.Z,
num_inducing=self.num_inducing, kernel=k)
m.randomize()
assert m.checkgrad(), "{} x psi2".format("+".join(map(lambda x: x.name, k._parameters_)))
def testPsi2_lin_bia(self):
k = self.kernels[3]
m = PsiStatModel('psi2', X=self.X, X_variance=self.X_var, Z=self.Z,
num_inducing=self.num_inducing, kernel=k)
m.randomize()
assert m.checkgrad(), "{} x psi2".format("+".join(map(lambda x: x.name, k._parameters_)))
def testPsi2_rbf(self):
k = self.kernels[1]
m = PsiStatModel('psi2', X=self.X, X_variance=self.X_var, Z=self.Z,
num_inducing=self.num_inducing, kernel=k)
m.randomize()
assert m.checkgrad(), "{} x psi2".format("+".join(map(lambda x: x.name, k._parameters_)))
def testPsi2_rbf_bia(self):
k = self.kernels[-1]
m = PsiStatModel('psi2', X=self.X, X_variance=self.X_var, Z=self.Z,
num_inducing=self.num_inducing, kernel=k)
m.randomize()
assert m.checkgrad(), "{} x psi2".format("+".join(map(lambda x: x.name, k._parameters_)))
def testPsi2_bia(self):
k = self.kernels[2]
m = PsiStatModel('psi2', X=self.X, X_variance=self.X_var, Z=self.Z,
num_inducing=self.num_inducing, kernel=k)
m.randomize()
assert m.checkgrad(), "{} x psi2".format("+".join(map(lambda x: x.name, k._parameters_)))
if __name__ == "__main__":
import sys
interactive = 'i' in sys.argv
if interactive:
# N, num_inducing, input_dim, input_dim = 30, 5, 4, 30
# X = numpy.random.rand(N, input_dim)
# k = GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001)
# K = k.K(X)
# Y = numpy.random.multivariate_normal(numpy.zeros(N), K, input_dim).T
# Y -= Y.mean(axis=0)
# k = GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001)
# m = GPy.models.Bayesian_GPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
# m.randomize()
# # self.assertTrue(m.checkgrad())
numpy.random.seed(0)
input_dim = 3
N = 3
num_inducing = 2
D = 15
X = numpy.random.randn(N, input_dim)
X_var = .5 * numpy.ones_like(X) + .1 * numpy.clip(numpy.random.randn(*X.shape), 0, 1)
Z = numpy.random.permutation(X)[:num_inducing]
Y = X.dot(numpy.random.randn(input_dim, D))
# kernel = GPy.kern.Bias(input_dim)
#
# kernels = [GPy.kern.Linear(input_dim), GPy.kern.RBF(input_dim), GPy.kern.Bias(input_dim),
# GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim),
# GPy.kern.RBF(input_dim) + GPy.kern.Bias(input_dim)]
# for k in kernels:
# m = PsiStatModel('psi1', X=X, X_variance=X_var, Z=Z,
# num_inducing=num_inducing, kernel=k)
# assert m.checkgrad(), "{} x psi1".format("+".join(map(lambda x: x.name, k.parts)))
#
m0 = PsiStatModel('psi0', X=X, X_variance=X_var, Z=Z,
num_inducing=num_inducing, kernel=GPy.kern.RBF(input_dim)+GPy.kern.Bias(input_dim))
# m1 = PsiStatModel('psi1', X=X, X_variance=X_var, Z=Z,
# num_inducing=num_inducing, kernel=kernel)
# m1 = PsiStatModel('psi1', X=X, X_variance=X_var, Z=Z,
# num_inducing=num_inducing, kernel=kernel)
# m2 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z,
# num_inducing=num_inducing, kernel=GPy.kern.RBF(input_dim))
# m3 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z,
# num_inducing=num_inducing, kernel=GPy.kern.Linear(input_dim, ARD=True, variances=numpy.random.rand(input_dim)))
# + GPy.kern.Bias(input_dim))
# m = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z,
# num_inducing=num_inducing,
# kernel=(
# GPy.kern.RBF(input_dim, ARD=1)
# +GPy.kern.Linear(input_dim, ARD=1)
# +GPy.kern.Bias(input_dim))
# )
# m.ensure_default_constraints()
m2 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z,
num_inducing=num_inducing, kernel=(
GPy.kern.RBF(input_dim, numpy.random.rand(), numpy.random.rand(input_dim), ARD=1)
#+GPy.kern.Linear(input_dim, numpy.random.rand(input_dim), ARD=1)
#+GPy.kern.RBF(input_dim, numpy.random.rand(), numpy.random.rand(input_dim), ARD=1)
#+GPy.kern.RBF(input_dim, numpy.random.rand(), numpy.random.rand(), ARD=0)
+GPy.kern.Bias(input_dim)
+GPy.kern.White(input_dim)
)
)
#m2.ensure_default_constraints()
else:
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import binascii
import math
import struct
import sys
import unittest
from ctypes import (Structure, Union, LittleEndianUnion, BigEndianUnion,
BigEndianStructure, LittleEndianStructure,
POINTER, sizeof, cast,
c_byte, c_ubyte, c_char, c_wchar, c_void_p,
c_short, c_ushort, c_int, c_uint,
c_long, c_ulong, c_longlong, c_ulonglong,
c_uint32, c_float, c_double)
from ._support import StructCheckMixin
def bin(s):
return binascii.hexlify(memoryview(s)).decode().upper()
# Each *simple* type that supports different byte orders has an
# __ctype_be__ attribute that specifies the same type in BIG ENDIAN
# byte order, and a __ctype_le__ attribute that is the same type in
# LITTLE ENDIAN byte order.
#
# For Structures and Unions, these types are created on demand.
class Test(unittest.TestCase, StructCheckMixin):
def test_slots(self):
class BigPoint(BigEndianStructure):
__slots__ = ()
_fields_ = [("x", c_int), ("y", c_int)]
self.check_struct(BigPoint)
class LowPoint(LittleEndianStructure):
__slots__ = ()
_fields_ = [("x", c_int), ("y", c_int)]
self.check_struct(LowPoint)
big = BigPoint()
little = LowPoint()
big.x = 4
big.y = 2
little.x = 2
little.y = 4
with self.assertRaises(AttributeError):
big.z = 42
with self.assertRaises(AttributeError):
little.z = 24
def test_endian_short(self):
if sys.byteorder == "little":
self.assertIs(c_short.__ctype_le__, c_short)
self.assertIs(c_short.__ctype_be__.__ctype_le__, c_short)
else:
self.assertIs(c_short.__ctype_be__, c_short)
self.assertIs(c_short.__ctype_le__.__ctype_be__, c_short)
s = c_short.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_short.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
def test_endian_int(self):
if sys.byteorder == "little":
self.assertIs(c_int.__ctype_le__, c_int)
self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int)
else:
self.assertIs(c_int.__ctype_be__, c_int)
self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int)
s = c_int.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_int.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<i", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">I", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<I", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
def test_endian_longlong(self):
if sys.byteorder == "little":
self.assertIs(c_longlong.__ctype_le__, c_longlong)
self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong)
else:
self.assertIs(c_longlong.__ctype_be__, c_longlong)
self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong)
s = c_longlong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_longlong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">Q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<Q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
def test_endian_float(self):
if sys.byteorder == "little":
self.assertIs(c_float.__ctype_le__, c_float)
self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float)
else:
self.assertIs(c_float.__ctype_be__, c_float)
self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float)
s = c_float(math.pi)
self.assertEqual(bin(struct.pack("f", math.pi)), bin(s))
# Hm, what's the precision of a float compared to a double?
self.assertAlmostEqual(s.value, math.pi, places=6)
s = c_float.__ctype_le__(math.pi)
self.assertAlmostEqual(s.value, math.pi, places=6)
self.assertEqual(bin(struct.pack("<f", math.pi)), bin(s))
s = c_float.__ctype_be__(math.pi)
self.assertAlmostEqual(s.value, math.pi, places=6)
self.assertEqual(bin(struct.pack(">f", math.pi)), bin(s))
def test_endian_double(self):
if sys.byteorder == "little":
self.assertIs(c_double.__ctype_le__, c_double)
self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double)
else:
self.assertIs(c_double.__ctype_be__, c_double)
self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double)
s = c_double(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("d", math.pi)), bin(s))
s = c_double.__ctype_le__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("<d", math.pi)), bin(s))
s = c_double.__ctype_be__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s))
def test_endian_other(self):
self.assertIs(c_byte.__ctype_le__, c_byte)
self.assertIs(c_byte.__ctype_be__, c_byte)
self.assertIs(c_ubyte.__ctype_le__, c_ubyte)
self.assertIs(c_ubyte.__ctype_be__, c_ubyte)
self.assertIs(c_char.__ctype_le__, c_char)
self.assertIs(c_char.__ctype_be__, c_char)
def test_struct_fields_unsupported_byte_order(self):
fields = [
("a", c_ubyte),
("b", c_byte),
("c", c_short),
("d", c_ushort),
("e", c_int),
("f", c_uint),
("g", c_long),
("h", c_ulong),
("i", c_longlong),
("k", c_ulonglong),
("l", c_float),
("m", c_double),
("n", c_char),
("b1", c_byte, 3),
("b2", c_byte, 3),
("b3", c_byte, 2),
("a", c_int * 3 * 3 * 3)
]
# these fields do not support different byte order:
for typ in c_wchar, c_void_p, POINTER(c_int):
with self.assertRaises(TypeError):
class T(BigEndianStructure if sys.byteorder == "little" else LittleEndianStructure):
_fields_ = fields + [("x", typ)]
self.check_struct(T)
def test_struct_struct(self):
# nested structures with different byteorders
# create nested structures with given byteorders and set memory to data
for nested, data in (
(BigEndianStructure, b'\0\0\0\1\0\0\0\2'),
(LittleEndianStructure, b'\1\0\0\0\2\0\0\0'),
):
for parent in (
BigEndianStructure,
LittleEndianStructure,
Structure,
):
class NestedStructure(nested):
_fields_ = [("x", c_uint32),
("y", c_uint32)]
self.check_struct(NestedStructure)
class TestStructure(parent):
_fields_ = [("point", NestedStructure)]
self.check_struct(TestStructure)
self.assertEqual(len(data), sizeof(TestStructure))
ptr = POINTER(TestStructure)
s = cast(data, ptr)[0]
self.assertEqual(s.point.x, 1)
self.assertEqual(s.point.y, 2)
def test_struct_field_alignment(self):
# standard packing in struct uses no alignment.
# So, we have to align using pad bytes.
#
# Unaligned accesses will crash Python (on those platforms that
# don't allow it, like sparc solaris).
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">bxhid"
else:
base = LittleEndianStructure
fmt = "<bxhid"
class S(base):
_fields_ = [("b", c_byte),
("h", c_short),
("i", c_int),
("d", c_double)]
self.check_struct(S)
s1 = S(0x12, 0x1234, 0x12345678, 3.14)
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_unaligned_nonnative_struct_fields(self):
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">b h xi xd"
else:
base = LittleEndianStructure
fmt = "<b h xi xd"
class S(base):
_pack_ = 1
_layout_ = "ms"
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
self.check_struct(S)
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_unaligned_native_struct_fields(self):
if sys.byteorder == "little":
fmt = "<b h xi xd"
else:
base = LittleEndianStructure
fmt = ">b h xi xd"
class S(Structure):
_pack_ = 1
_layout_ = "ms"
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
self.check_struct(S)
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_union_fields_unsupported_byte_order(self):
fields = [
("a", c_ubyte),
("b", c_byte),
("c", c_short),
("d", c_ushort),
("e", c_int),
("f", c_uint),
("g", c_long),
("h", c_ulong),
("i", c_longlong),
("k", c_ulonglong),
("l", c_float),
("m", c_double),
("n", c_char),
("b1", c_byte, 3),
("b2", c_byte, 3),
("b3", c_byte, 2),
("a", c_int * 3 * 3 * 3)
]
# these fields do not support different byte order:
for typ in c_wchar, c_void_p, POINTER(c_int):
with self.assertRaises(TypeError):
class T(BigEndianUnion if sys.byteorder == "little" else LittleEndianUnion):
_fields_ = fields + [("x", typ)]
self.check_union(T)
def test_union_struct(self):
# nested structures in unions with different byteorders
# create nested structures in unions with given byteorders and set memory to data
for nested, data in (
(BigEndianStructure, b'\0\0\0\1\0\0\0\2'),
(LittleEndianStructure, b'\1\0\0\0\2\0\0\0'),
):
for parent in (
BigEndianUnion,
LittleEndianUnion,
Union,
):
class NestedStructure(nested):
_fields_ = [("x", c_uint32),
("y", c_uint32)]
self.check_struct(NestedStructure)
class TestUnion(parent):
_fields_ = [("point", NestedStructure)]
self.check_union(TestUnion)
self.assertEqual(len(data), sizeof(TestUnion))
ptr = POINTER(TestUnion)
s = cast(data, ptr)[0]
self.assertEqual(s.point.x, 1)
self.assertEqual(s.point.y, 2)
def test_build_struct_union_opposite_system_byteorder(self):
# gh-105102
if sys.byteorder == "little":
_Structure = BigEndianStructure
_Union = BigEndianUnion
else:
_Structure = LittleEndianStructure
_Union = LittleEndianUnion
class S1(_Structure):
_fields_ = [("a", c_byte), ("b", c_byte)]
self.check_struct(S1)
class U1(_Union):
_fields_ = [("s1", S1), ("ab", c_short)]
self.check_union(U1)
class S2(_Structure):
_fields_ = [("u1", U1), ("c", c_byte)]
self.check_struct(S2)
if __name__ == "__main__":
unittest.main()
|
python
|
github
|
https://github.com/python/cpython
|
Lib/test/test_ctypes/test_byteswap.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-GAN's head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python.estimator.python import head
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import training
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def dummy_loss(gan_model, add_summaries=True): # pylint:disable=unused-argument
return math_ops.reduce_sum(gan_model.discriminator_real_outputs -
gan_model.discriminator_gen_outputs)
def get_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
gen_var = variable_scope.get_variable('dummy_var', initializer=0.0)
with variable_scope.variable_scope('discriminator') as dis_scope:
dis_var = variable_scope.get_variable('dummy_var', initializer=0.0)
return tfgan_tuples.GANModel(
generator_inputs=None,
generated_data=array_ops.ones([3, 4]),
generator_variables=[gen_var],
generator_scope=gen_scope,
generator_fn=None,
real_data=None,
discriminator_real_outputs=array_ops.ones([1, 2, 3]) * dis_var,
discriminator_gen_outputs=array_ops.ones([1, 2, 3]) * gen_var * dis_var,
discriminator_variables=[dis_var],
discriminator_scope=dis_scope,
discriminator_fn=None)
class GANHeadTest(test.TestCase):
def setUp(self):
super(GANHeadTest, self).setUp()
self.gan_head = head.gan_head(
generator_loss_fn=dummy_loss,
discriminator_loss_fn=dummy_loss,
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0),
get_eval_metric_ops_fn=self.get_metrics)
self.assertIsInstance(self.gan_head, head.GANHead)
def get_metrics(self, gan_model):
self.assertTrue(isinstance(gan_model, tfgan_tuples.GANModel))
return {}
def _test_modes_helper(self, mode):
return self.gan_head.create_estimator_spec(
features=None,
mode=mode,
logits=get_gan_model())
def test_modes_predict(self):
spec = self._test_modes_helper(model_fn_lib.ModeKeys.PREDICT)
self.assertItemsEqual((_DEFAULT_SERVING_KEY, 'predict'),
spec.export_outputs.keys())
def test_modes_eval(self):
self._test_modes_helper(model_fn_lib.ModeKeys.EVAL)
def test_modes_train(self):
self._test_modes_helper(model_fn_lib.ModeKeys.TRAIN)
if __name__ == '__main__':
test.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import postcss, { type ChildNode, type Plugin, type Root } from 'postcss'
import { format, type Options } from 'prettier'
import { walk } from '../../utils/walk'
const FORMAT_OPTIONS: Options = {
parser: 'css',
semi: true,
singleQuote: true,
}
// Prettier is used to generate cleaner output, but it's only used on the nodes
// that were marked as `pretty` during the migration.
export function formatNodes(): Plugin {
async function migrate(root: Root) {
// Find the nodes to format
let nodesToFormat: ChildNode[] = []
walk(root, (child, _idx, parent) => {
// Always print semicolons after at-rules
if (child.type === 'atrule') {
child.raws.semicolon = true
}
if (child.type === 'atrule' && child.name === 'tw-bucket') {
nodesToFormat.push(child)
} else if (child.raws.tailwind_pretty) {
// @ts-expect-error We might not have a parent
child.parent ??= parent
nodesToFormat.unshift(child)
}
})
let output: string[] = []
// Format the nodes
for (let node of nodesToFormat) {
let contents = (() => {
if (node.type === 'atrule' && node.name === 'tw-bucket') {
// Remove the `@tw-bucket` wrapping, and use the contents directly.
return node
.toString()
.trim()
.replace(/@tw-bucket(.*?){([\s\S]*)}/, '$2')
}
return node.toString()
})()
// Do not format the user bucket to ensure we keep the user's formatting
// intact.
if (node.type === 'atrule' && node.name === 'tw-bucket' && node.params === 'user') {
output.push(contents)
continue
}
// Format buckets
if (node.type === 'atrule' && node.name === 'tw-bucket') {
output.push(await format(contents, FORMAT_OPTIONS))
continue
}
// Format any other nodes
node.replaceWith(
postcss.parse(
`${node.raws.before ?? ''}${(await format(contents, FORMAT_OPTIONS)).trim()}`,
),
)
}
root.removeAll()
root.append(
postcss.parse(
output
.map((bucket) => bucket.trim())
.filter(Boolean)
.join('\n\n'),
),
)
}
return {
postcssPlugin: '@tailwindcss/upgrade/format-nodes',
OnceExit: migrate,
}
}
|
typescript
|
github
|
https://github.com/tailwindlabs/tailwindcss
|
packages/@tailwindcss-upgrade/src/codemods/css/format-nodes.ts
|
from django import template
from django.template import Node, Token, TemplateSyntaxError
from django.template import resolve_variable
from django.template.defaultfilters import stringfilter
from django.templatetags import future
from localeurlcustom import utils
register = template.Library()
def chlocale(url, locale):
"""
Changes the URL's locale prefix if the path is not locale-independent.
Otherwise removes locale prefix.
"""
_, path = utils.strip_script_prefix(url)
_, path = utils.strip_path(path)
return utils.locale_url(path, locale)
chlocale = stringfilter(chlocale)
register.filter('chlocale', chlocale)
def rmlocale(url):
"""Removes the locale prefix from the URL."""
script_prefix, path = utils.strip_script_prefix(url)
_, path = utils.strip_path(path)
return ''.join([script_prefix, path])
rmlocale = stringfilter(rmlocale)
register.filter('rmlocale', rmlocale)
def locale_url(parser, token):
"""
Renders the url for the view with another locale prefix. The syntax is
like the 'url' tag, only with a locale before the view.
Examples:
{% locale_url "de" cal.views.day day %}
{% locale_url "nl" cal.views.home %}
{% locale_url "en-gb" cal.views.month month as month_url %}
"""
bits = token.split_contents()
if len(bits) < 3:
raise TemplateSyntaxError("'%s' takes at least two arguments:"
" the locale and a view" % bits[0])
urltoken = Token(token.token_type, bits[0] + ' ' + ' '.join(bits[2:]))
urlnode = future.url(parser, urltoken)
return LocaleURLNode(bits[1], urlnode)
class LocaleURLNode(Node):
def __init__(self, locale, urlnode):
self.locale = locale
self.urlnode = urlnode
def render(self, context):
locale = resolve_variable(self.locale, context)
if utils.supported_language(locale) is None:
raise ValueError("locale not in settings.LANGUAGES: %s" % locale)
path = self.urlnode.render(context)
if self.urlnode.asvar:
self.urlnode.render(context)
context[self.urlnode.asvar] = chlocale(context[self.urlnode.asvar],
locale)
return ''
else:
return chlocale(path, locale)
register.tag('locale_url', locale_url)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Ansible Core Team
connection: delegation_connection
short_description: Test connection for delegated host check
description:
- Some further description that you don't care about.
options:
remote_password:
description: The remote password
type: str
vars:
- name: ansible_password
# Tests that an aliased key gets the -k option which hardcodes the value to password
aliases:
- password
"""
from ansible.plugins.connection import ConnectionBase
class Connection(ConnectionBase):
transport = 'delegation_connection'
has_pipelining = True
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
def _connect(self):
super(Connection, self)._connect()
def exec_command(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data, sudoable)
def put_file(self, in_path, out_path):
super(Connection, self).put_file(in_path, out_path)
def fetch_file(self, in_path, out_path):
super(Connection, self).fetch_file(in_path, out_path)
def close(self):
super(Connection, self).close()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# datatasks\destinations\DatabaseDestination.py
import os
from datatasks.db.SQLBuilderMixin import SQLBuilderMixin
from datatasks.db.DatabaseFactory import DatabaseFactory
from datatasks import settings
from .FileDestination import FileDestination
class DatabaseDestination(FileDestination, SQLBuilderMixin):
"""Capable of writing records to a database table. Inherits from FileDestination."""
def __init__(self, name, db_name, tablename_str, **kwargs):
self.filepath_str = '{}{}.csv'.format(settings.SHARE_PATH, name)
super().__init__(name, self.filepath_str, write_headers=False, **kwargs)
self._db = DatabaseFactory().get_db(db_name)
self.tablename_str = tablename_str
self.sql_table_exists = False
self.set_columns()
def set_columns(self):
"""
Querys the database table for the column names and sets the key_list of the instance.
If table does not exist key list nothing is set.
"""
super(DatabaseDestination, self).set_keys(self._db.get_column_list(self.tablename_str))
if self._key_list != []:
self.sql_table_exists = True
else:
self.set_keys(self.source.keys)
def write_out(self):
"""
Writes records to the database table. Records are first printed to a file
(preferrably a share on the machine where the database is running). Then a bulk
insert command is used based on the db_platform attribute set during instantiation.
"""
if self.sql_table_exists:
sql_str = 'delete from {};'.format(self.tablename_str)
else:
sql_str = self.build_create_table_sql()
self._db.execute_command(sql_str)
self.truncate_fields()
self._db.insert_into(self.tablename_str, self.source)
def truncate_fields(self):
"""
Truncates fields to prepare for an auto-assigned varchar(x) datatype that may
cause database loads to fail.
"""
if self.sql_table_exists:
return
if hasattr(self, 'sql_datatype_map') is False:
raise Exception('attribute sql_datatype_map is required to be set on this instance')
for col, datatype in self.sql_datatype_map.items():
if datatype[:7] == 'varchar':
max_length = int(datatype.split('(')[1].replace(')', ''))
for record in self.source:
if isinstance(record[col], str):
record[col] = record[col][:max_length]
|
unknown
|
codeparrot/codeparrot-clean
| ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2alpha1",
"metadata": {
"name": "v0alpha1.xychart-migrations.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"query": {
"kind": "grafana",
"spec": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-10": {
"kind": "Panel",
"spec": {
"id": 10,
"title": "",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "text",
"spec": {
"pluginVersion": "10.3.0-pre",
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
"content": "---\n### XYChart\n\n- Multi-dimensional, non-timeseries data (scientific, financial, engineering)\n- Scatter plots, bubble charts, function graphs, and etch-a-sketch!\n",
"mode": "markdown"
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-11": {
"kind": "Panel",
"spec": {
"id": 11,
"title": "Multi-series Temperature vs Humidity",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana",
"spec": {
"queryType": "snapshot",
"snapshot": [
{
"data": {
"values": [
[
1686898151000,
1686898161000,
1686898171000,
1686898181000,
1686898191000,
1686898201000,
1686898211000,
1686898221000,
1686898231000,
1686898241000
],
[
0.4860747509084685,
0.4790778553981873,
0.4597786615711249,
0.4478751241705323,
0.44020548182152397,
0.4569020188814074,
0.4655250429581913,
0.4819032762634117,
0.491365303224968,
0.5092239049148886
],
[
35.084960222673146,
35.089101898649055,
35.130020139289115,
35.10251705413486,
35.08184755996438,
35.04582600108574,
35.09077388700681,
35.04096375315356,
35.05647864027338,
35.091800720894916
],
[
71.2187151336704,
71.24617135003214,
71.21726829714834,
71.26528218716341,
71.26084702358875,
71.24054378677393,
71.27884802991244,
71.29783163616926,
71.33230999794793,
71.34975514559837
]
]
},
"schema": {
"fields": [
{
"config": {},
"name": "_time",
"type": "time",
"typeInfo": {
"frame": "time.Time",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0100"
},
"name": "co",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0100"
},
"name": "humidity",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0100"
},
"name": "temperature",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
}
],
"meta": {
"executedQueryString": "import \"influxdata/influxdb/sample\"\nimport \"influxdata/influxdb/schema\"\n\nsample.data(set: \"airSensor\")\n |\u003e limit(n: 10)\n |\u003e group(columns: [\"sensor_id\"])\n |\u003e schema.fieldsAsCols()\n",
"typeVersion": [
0,
0
]
},
"refId": "A"
}
},
{
"data": {
"values": [
[
1686898151000,
1686898161000,
1686898171000,
1686898181000,
1686898191000,
1686898201000,
1686898211000,
1686898221000,
1686898231000,
1686898241000
],
[
0.4964171323208575,
0.48257922637952133,
0.46567087322576145,
0.4467650568596856,
0.4417772514767169,
0.4341379534638706,
0.439345038257859,
0.43452737707188627,
0.42417500678639164,
0.40704613738954887
],
[
34.92662088456207,
34.91464944732493,
34.95985887959429,
34.929238294894844,
34.954103486412336,
34.98895514808448,
35.03476240219413,
35.028863365125844,
35.03717014192905,
35.00775919996651
],
[
71.83521495327129,
71.83370684393908,
71.83712160725877,
71.88299650060345,
71.90526594972503,
71.8631768515712,
71.85577028152356,
71.89245169322045,
71.93606971457449,
71.96164771829956
]
]
},
"schema": {
"fields": [
{
"config": {},
"name": "_time",
"type": "time",
"typeInfo": {
"frame": "time.Time",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0101"
},
"name": "co",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0101"
},
"name": "humidity",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0101"
},
"name": "temperature",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
}
],
"refId": "A"
}
},
{
"data": {
"values": [
[
1686898151000,
1686898161000,
1686898171000,
1686898181000,
1686898191000,
1686898201000,
1686898211000,
1686898221000,
1686898231000,
1686898241000
],
[
0.4870365157212326,
0.5032215395360725,
0.5035259485542879,
0.4841798164121083,
0.483812619615984,
0.4882993499867786,
0.4719009580435705,
0.4700689455810135,
0.48112413053169706,
0.4691106107162724
],
[
34.86359615237462,
34.84629630959152,
34.81771460191974,
34.854640808896036,
34.86699022367547,
34.864319179513096,
34.86915527122888,
34.89889649251399,
34.86576388906259,
34.88622805723735
],
[
72.03972207916735,
72.05566134520713,
72.08473708469143,
72.07306942754916,
72.0427409958616,
72.04536459775873,
72.09400368933838,
72.14293243585941,
72.18840859469728,
72.15524663568557
]
]
},
"schema": {
"fields": [
{
"config": {},
"name": "_time",
"type": "time",
"typeInfo": {
"frame": "time.Time",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0102"
},
"name": "co",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0102"
},
"name": "humidity",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0102"
},
"name": "temperature",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
}
],
"refId": "A"
}
},
{
"data": {
"values": [
[
1686898151000,
1686898161000,
1686898171000,
1686898181000,
1686898191000,
1686898201000,
1686898211000,
1686898221000,
1686898231000,
1686898241000
],
[
0.3979338437168368,
0.4117117830425588,
0.4000376537804249,
0.40114553332960723,
0.3909759394807085,
0.38213452854680874,
0.37339826793170855,
0.3624713623472977,
0.35640734768856297,
0.35690557816119645
],
[
35.16192242281515,
35.19891757080395,
35.23754373785834,
35.25270355198698,
35.29525947947623,
35.272026086051184,
35.275233467451635,
35.23834593104291,
35.25233833634368,
35.22053598631417
],
[
71.29169927438586,
71.32114142326272,
71.33632903748085,
71.3064635464553,
71.26436580075855,
71.30431517798449,
71.28861822950174,
71.26778760430453,
71.23481860950403,
71.23084180913762
]
]
},
"schema": {
"fields": [
{
"config": {},
"name": "_time",
"type": "time",
"typeInfo": {
"frame": "time.Time",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0103"
},
"name": "co",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0103"
},
"name": "humidity",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0103"
},
"name": "temperature",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
}
],
"refId": "A"
}
},
{
"data": {
"values": [
[
1686898151000,
1686898161000,
1686898171000,
1686898181000,
1686898191000,
1686898201000,
1686898211000,
1686898221000,
1686898231000,
1686898241000
],
[
0.5129015632374951,
0.552968955740271,
0.5690444135565038,
0.6050221760178951,
0.5943904374498042,
0.6153588301241533,
0.6164375823908576,
0.6059635952981327,
0.6470283318343141,
0.6861075098169984
],
[
35.75385592664215,
35.753218714107504,
35.72547059379115,
35.74553237372676,
35.73378268235891,
35.747560005048086,
35.769867599816735,
35.73929899563808,
35.705317880681875,
35.65611547721327
],
[
73.64806558624832,
73.65728412124716,
73.68805520135102,
73.65185281162863,
73.63499402146681,
73.68359308763364,
73.64682180062367,
73.63298265031713,
73.59096788790947,
73.6381949597034
]
]
},
"schema": {
"fields": [
{
"config": {},
"name": "_time",
"type": "time",
"typeInfo": {
"frame": "time.Time",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0200"
},
"name": "co",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0200"
},
"name": "humidity",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0200"
},
"name": "temperature",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
}
],
"refId": "A"
}
},
{
"data": {
"values": [
[
1686898151000,
1686898161000,
1686898171000,
1686898181000,
1686898191000,
1686898201000,
1686898211000,
1686898221000,
1686898231000,
1686898241000
],
[
0.5082489903097206,
0.525490999229553,
0.5300688563597754,
0.5351282770519872,
0.533437724470923,
0.5164188260958164,
0.5134188770545406,
0.4949993424337248,
0.4960387472164697,
0.5046934441184395
],
[
35.24972613768055,
35.238916979727335,
35.28758958750148,
35.26413172906637,
35.2875488141577,
35.32420268624064,
35.2932945023372,
35.277959228995584,
35.27901195680498,
35.27056468599647
],
[
73.99618150289902,
74.01973867729899,
73.97488048626532,
73.97027021897547,
73.96055098802299,
73.94544160099603,
73.91010608315078,
73.9594600253564,
73.93323000030807,
73.94265737589377
]
]
},
"schema": {
"fields": [
{
"config": {},
"name": "_time",
"type": "time",
"typeInfo": {
"frame": "time.Time",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0201"
},
"name": "co",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0201"
},
"name": "humidity",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0201"
},
"name": "temperature",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
}
],
"refId": "A"
}
},
{
"data": {
"values": [
[
1686898151000,
1686898161000,
1686898171000,
1686898181000,
1686898191000,
1686898201000,
1686898211000,
1686898221000,
1686898231000,
1686898241000
],
[
0.4843506884938821,
0.48132272563110234,
0.4738268718199932,
0.4929629619836042,
0.4730642763470896,
0.4654986284178103,
0.47939137148137106,
0.4729232244284411,
0.48941836624955776,
0.49438537058971027
],
[
35.6843507637763,
35.67052290561625,
35.651191124511904,
35.682694567957775,
35.71754463951193,
35.73271359197846,
35.76201128140823,
35.72031962293351,
35.76508898680512,
35.77376488207177
],
[
75.28536533620796,
75.3203884403274,
75.32915092826639,
75.3260291876276,
75.29613126188146,
75.31098782530198,
75.27434784939884,
75.30929861509146,
75.32818662869892,
75.33650169518009
]
]
},
"schema": {
"fields": [
{
"config": {},
"name": "_time",
"type": "time",
"typeInfo": {
"frame": "time.Time",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0202"
},
"name": "co",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0202"
},
"name": "humidity",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0202"
},
"name": "temperature",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
}
],
"refId": "A"
}
},
{
"data": {
"values": [
[
1686898151000,
1686898161000,
1686898171000,
1686898181000,
1686898191000,
1686898201000,
1686898211000,
1686898221000,
1686898231000,
1686898241000
],
[
0.39373421338928505,
0.40929905142175416,
0.39727569840202215,
0.415834035946844,
0.413865826090914,
0.4271278288285897,
0.435277474574255,
0.4480042029285831,
0.462641109361364,
0.4566678920783633
],
[
35.86322585633077,
35.88732568460068,
35.87553827435961,
35.84063967239316,
35.813642046057005,
35.8569165461967,
35.80924651587166,
35.80599199551315,
35.75906423980926,
35.79948361256034
],
[
74.78112365229704,
74.73821283157658,
74.73019922701285,
74.73280967321202,
74.74885460260536,
74.70880911705714,
74.73346687605624,
74.77682413662681,
74.80562351134131,
74.82881664528719
]
]
},
"schema": {
"fields": [
{
"config": {},
"name": "_time",
"type": "time",
"typeInfo": {
"frame": "time.Time",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0203"
},
"name": "co",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0203"
},
"name": "humidity",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
},
{
"config": {},
"labels": {
"sensor_id": "TLM0203"
},
"name": "temperature",
"type": "number",
"typeInfo": {
"frame": "float64",
"nullable": true
}
}
],
"refId": "A"
}
}
]
}
},
"datasource": {
"type": "grafana",
"uid": "grafana"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "xychart",
"spec": {
"pluginVersion": "",
"options": {
"dims": {
"exclude": [
"co TLM0100"
],
"x": "humidity TLM0100"
},
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"series": [
{
"pointColor": {},
"x": "humidity",
"y": "temperature"
}
],
"seriesMapping": "manual",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"pointSize": {
"fixed": 5
},
"scaleDistribution": {
"type": "linear"
},
"show": "points"
}
},
"overrides": []
}
}
}
}
},
"panel-12": {
"kind": "Panel",
"spec": {
"id": 12,
"title": "Color by field (gradient)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"csvFileName": "flight_info_by_state.csv",
"scenarioId": "csv_file"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "xychart",
"spec": {
"pluginVersion": "",
"options": {
"dims": {
"frame": 0
},
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"series": [
{
"name": "Price",
"pointColor": {
"field": "Price",
"fixed": "#fade2a40"
},
"pointSize": {
"fixed": 10,
"max": 50,
"min": 1
},
"x": "Lat",
"y": "Lng"
}
],
"seriesMapping": "manual",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "continuous-BlYlRd",
"fixedColor": "red"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1,
"pointSize": {
"fixed": 5
},
"scaleDistribution": {
"type": "linear"
},
"show": "points"
}
},
"overrides": []
}
}
}
}
},
"panel-13": {
"kind": "Panel",
"spec": {
"id": 13,
"title": "Color by field (threshold)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"csvFileName": "flight_info_by_state.csv",
"scenarioId": "csv_file"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "xychart",
"spec": {
"pluginVersion": "",
"options": {
"dims": {
"frame": 0
},
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"series": [
{
"name": "Price",
"pointColor": {
"field": "Price",
"fixed": "#fade2a40"
},
"pointSize": {
"fixed": 10,
"max": 50,
"min": 1
},
"x": "Lat",
"y": "Lng"
}
],
"seriesMapping": "manual",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 500,
"color": "red"
}
]
},
"color": {
"mode": "thresholds",
"fixedColor": "red"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1,
"pointSize": {
"fixed": 5
},
"scaleDistribution": {
"type": "linear"
},
"show": "points"
}
},
"overrides": []
}
}
}
}
},
"panel-14": {
"kind": "Panel",
"spec": {
"id": 14,
"title": "Color by field (value mappings)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"csvFileName": "flight_info_by_state.csv",
"scenarioId": "csv_file"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "xychart",
"spec": {
"pluginVersion": "",
"options": {
"dims": {
"frame": 0
},
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"series": [
{
"name": "Price",
"pointColor": {
"field": "Price",
"fixed": "#fade2a40"
},
"pointSize": {
"fixed": 10,
"max": 50,
"min": 1
},
"x": "Lat",
"y": "Lng"
}
],
"seriesMapping": "manual",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"mappings": [
{
"type": "value",
"options": {
"700": {
"color": "purple",
"index": 0
}
}
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
}
]
},
"color": {
"mode": "thresholds",
"fixedColor": "red"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1,
"pointSize": {
"fixed": 5
},
"scaleDistribution": {
"type": "linear"
},
"show": "points"
}
},
"overrides": []
}
}
}
}
},
"panel-2": {
"kind": "Panel",
"spec": {
"id": 2,
"title": "CNC/Routing \"Etch-A-Sketch\"",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"csvContent": "X,Y\n725.1,435.6\n734.1,497.2\n714.3,527.7\n683.5,548.7\n601.8,594.0\n598.5,621.7\n573.9,644.7\n525.7,695.7\n477.2,732.8\n411.8,755.3\n353.6,758.3\n422.6,736.5\n455.3,724.1\n479.2,699.2\n474.0,673.8\n434.5,662.1\n362.2,679.8\n311.2,698.8\n260.1,728.9\n213.4,771.1\n176.2,818.0\n211.2,742.6\n253.9,707.9\n309.9,668.8\n374.7,643.2\n322.8,629.9\n277.1,607.1\n237.0,616.8\n188.9,613.9\n143.0,594.1\n101.8,566.4\n178.1,590.2\n222.2,575.9\n187.9,549.1\n161.5,517.5\n128.6,506.8\n97.3,488.3\n62.4,436.0\n99.6,473.8\n138.3,477.0\n125.0,396.7\n95.6,359.2\n83.6,322.1\n81.0,289.7\n104.0,343.8\n129.3,358.4\n151.2,291.1\n124.0,242.6\n126.3,170.2\n133.7,212.8\n148.4,243.3\n167.9,262.7\n209.1,205.7\n230.1,150.3\n231.4,120.1\n316.0,120.1\n400.6,120.1\n485.2,120.1\n569.8,120.1\n569.3,166.4\n553.0,205.5\n489.2,265.7\n422.2,309.1\n353.7,343.1\n328.2,386.3\n321.6,432.6\n334.1,473.1\n357.6,500.3\n389.9,508.5\n418.8,479.9\n447.9,413.3\n480.0,379.0\n521.6,354.2\n583.9,351.6\n549.7,357.7\n571.6,376.0\n517.1,380.8\n550.3,393.2\n504.3,402.3\n489.5,425.8\n527.5,425.8\n472.7,457.2\n447.1,523.8\n538.6,435.7\n598.4,403.7\n697.8,349.1\n645.3,390.4\n712.3,373.8\n586.0,424.1\n526.9,463.3\n469.5,538.0\n540.6,477.5\n531.2,528.4\n598.6,460.1\n594.5,509.0\n651.2,460.1\n649.9,502.6\n699.4,446.1\n707.6,477.1\n722.0,442.9",
"scenarioId": "csv_content"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "xychart",
"spec": {
"pluginVersion": "",
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"series": [],
"seriesMapping": "auto",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "fixed",
"fixedColor": "red"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1,
"pointSize": {
"fixed": 5
},
"scaleDistribution": {
"type": "linear"
},
"show": "points+lines"
}
},
"overrides": []
}
}
}
}
},
"panel-3": {
"kind": "Panel",
"spec": {
"id": 3,
"title": "Function Plots",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"alias": "",
"csvContent": "x,cos(x)\n-7.5,3.4663531783502584\n-7.45,3.9308257356494076\n-7.4,4.385473275743903\n-7.35,4.829159416559378\n-7.3,5.260775173811053\n-7.25,5.679241732886949\n-7.2,6.083513145322545\n-7.15,6.472578943127236\n-7.1,6.845466664428066\n-7.05,7.201244284117942\n-7,7.539022543433046\n-6.95,7.857957172636611\n-6.9,8.157251001253568\n-6.85,8.436155950581597\n-6.8,8.693974903498253\n-6.75,8.930063446890767\n-6.7,9.143831482353194\n-6.65,9.334744701125118\n-6.6,9.502325919585296\n-6.55,9.64615627196218\n-6.5,9.765876257280235\n-6.45,9.861186637925126\n-6.4,9.931849187581927\n-6.35,9.97768728667684\n-6.3,9.998586363834152\n-6.25,9.994494182244994\n-6.2,9.965420970232175\n-6.15,9.91143939568469\n-6.1,9.832684384425844\n-6.05,9.729352782968974\n-6,9.601702866503661\n-5.95,9.450053693342275\n-5.9,9.27478430744036\n-5.85,9.076332790984132\n-5.8,8.85519516941319\n-5.75,8.611924171615208\n-5.7,8.347127848391597\n-5.65,8.061468052647157\n-5.6,7.755658785102496\n-5.55,7.430464409664099\n-5.5,7.0866977429126\n-5.45,6.725218022484659\n-5.4,6.346928759426347\n-5.35,5.952775479886061\n-5.3,5.543743361791607\n-5.25,5.120854772418407\n-5.2,4.685166713003771\n-5.15,4.237768176794282\n-5.1,3.7797774271298024\n-5.05,3.3123392023675367\n-5,2.8366218546322624\n-4.95,2.353814429544512\n-4.9,1.8651236942257576\n-4.85,1.371771121009073\n-4.8,0.874989834394464\n-4.75,0.37602152887976553\n-4.7,-0.1238866346289056\n-4.65,-0.6234851460699166\n-4.6,-1.1215252693505486\n-4.55,-1.616762163536865\n-4.5,-2.107957994307797\n-4.45,-2.593885027896261\n-4.4,-3.0733286997841933\n-4.35,-3.5450906504813195\n-4.3,-4.007991720799755\n-4.25,-4.460874899137928\n-4.2,-4.902608213406994\n-4.15,-5.332087560371543\n-4.1,-5.748239465332691\n-4.05,-6.150023765255744\n-4,-6.536436208636119\n-3.95,-6.906510965605075\n-3.9,-7.259323042001402\n-3.85,-7.593990591375079\n-3.8,-7.909677119144169\n-3.75,-8.205593573395607\n-3.7,-8.48100031710408\n-3.65,-8.73520897683938\n-3.6,-8.96758416334147\n-3.55,-9.177545059662759\n-3.5,-9.364566872907963\n-3.45,-9.528182145943047\n-3.4,-9.66798192579461\n-3.35,-9.78361678581934\n-3.3,-9.87479769908865\n-3.25,-9.941296760805463\n-3.2,-9.982947757947532\n-3.15,-9.99964658471342\n-3.1,-9.991351502732794\n-3.05,-9.958083245390611\n-3,-9.899924966004454\n-2.95,-9.81702202998454\n-2.9,-9.709581651495906\n-2.85,-9.577872375530903\n-2.8,-9.42222340668658\n-2.75,-9.243023786324635\n-2.7,-9.040721420170613\n-2.65,-8.815821958782859\n-2.6,-8.568887533689473\n-2.55,-8.30053535235222\n-2.5,-8.011436155469337\n-2.45,-7.702312540473074\n-2.4,-7.373937155412454\n-2.35,-7.027130767735539\n-2.3,-6.66276021279824\n-2.25,-6.281736227227391\n-2.2,-5.885011172553458\n-2.15,-5.473576654802709\n-2.1,-5.048461045998575\n-2.05,-4.610726913767127\n-2,-4.161468365471424\n-1.95,-3.7018083135128688\n-1.9,-3.2328956686350336\n-1.85,-2.7559024682451296\n-1.8,-2.272020946930871\n-1.75,-1.7824605564949207\n-1.7,-1.2884449429552465\n-1.65,-0.7912088880673386\n-1.6,-0.29199522301288816\n-1.55,0.20794827803092428\n-1.5,0.7073720166770291\n-1.45,1.2050276936736661\n-1.4,1.6996714290024104\n-1.35,2.1900668709304147\n-1.3,2.6749882862458736\n-1.25,3.1532236239526865\n-1.2,3.623577544766736\n-1.15,4.084874408841574\n-1.1,4.5359612142557735\n-1.05,4.97571047891727\n-1,5.403023058681398\n-0.95,5.8168308946388345\n-0.9,6.216099682706644\n-0.85,6.599831458849822\n-0.8,6.967067093471654\n-0.75,7.316888688738209\n-0.7,7.648421872844885\n-0.65,7.960837985490558\n-0.6,8.253356149096783\n-0.55,8.525245220595057\n-0.5,8.775825618903728\n-0.45,9.004471023526769\n-0.4,9.210609940028851\n-0.35,9.393727128473788\n-0.3,9.55336489125606\n-0.25,9.689124217106448\n-0.2,9.800665778412416\n-0.15,9.887710779360422\n-0.1,9.950041652780257\n-0.05,9.987502603949663\n0,10\n0.05,9.987502603949663\n0.1,9.950041652780257\n0.15,9.887710779360422\n0.2,9.800665778412416\n0.25,9.689124217106448\n0.3,9.55336489125606\n0.35,9.393727128473788\n0.4,9.210609940028851\n0.45,9.004471023526769\n0.5,8.775825618903728\n0.55,8.525245220595057\n0.6,8.253356149096783\n0.65,7.960837985490558\n0.7,7.648421872844885\n0.75,7.316888688738209\n0.8,6.967067093471654\n0.85,6.599831458849822\n0.9,6.216099682706644\n0.95,5.8168308946388345\n1,5.403023058681398\n1.05,4.97571047891727\n1.1,4.5359612142557735\n1.15,4.084874408841574\n1.2,3.623577544766736\n1.25,3.1532236239526865\n1.3,2.6749882862458736\n1.35,2.1900668709304147\n1.4,1.6996714290024104\n1.45,1.2050276936736661\n1.5,0.7073720166770291\n1.55,0.20794827803092428\n1.6,-0.29199522301288816\n1.65,-0.7912088880673386\n1.7,-1.2884449429552465\n1.75,-1.7824605564949207\n1.8,-2.272020946930871\n1.85,-2.7559024682451296\n1.9,-3.2328956686350336\n1.95,-3.7018083135128688\n2,-4.161468365471424\n2.05,-4.610726913767127\n2.1,-5.048461045998575\n2.15,-5.473576654802709\n2.2,-5.885011172553458\n2.25,-6.281736227227391\n2.3,-6.66276021279824\n2.35,-7.027130767735539\n2.4,-7.373937155412454\n2.45,-7.702312540473074\n2.5,-8.011436155469337\n2.55,-8.30053535235222\n2.6,-8.568887533689473\n2.65,-8.815821958782859\n2.7,-9.040721420170613\n2.75,-9.243023786324635\n2.8,-9.42222340668658\n2.85,-9.577872375530903\n2.9,-9.709581651495906\n2.95,-9.81702202998454\n3,-9.899924966004454\n3.05,-9.958083245390611\n3.1,-9.991351502732794\n3.15,-9.99964658471342\n3.2,-9.982947757947532\n3.25,-9.941296760805463\n3.3,-9.87479769908865\n3.35,-9.78361678581934\n3.4,-9.66798192579461\n3.45,-9.528182145943047\n3.5,-9.364566872907963\n3.55,-9.177545059662759\n3.6,-8.96758416334147\n3.65,-8.73520897683938\n3.7,-8.48100031710408\n3.75,-8.205593573395607\n3.8,-7.909677119144169\n3.85,-7.593990591375079\n3.9,-7.259323042001402\n3.95,-6.906510965605075\n4,-6.536436208636119\n4.05,-6.150023765255744\n4.1,-5.748239465332691\n4.15,-5.332087560371543\n4.2,-4.902608213406994\n4.25,-4.460874899137928\n4.3,-4.007991720799755\n4.35,-3.5450906504813195\n4.4,-3.0733286997841933\n4.45,-2.593885027896261\n4.5,-2.107957994307797\n4.55,-1.616762163536865\n4.6,-1.1215252693505486\n4.65,-0.6234851460699166\n4.7,-0.1238866346289056\n4.75,0.37602152887976553\n4.8,0.874989834394464\n4.85,1.371771121009073\n4.9,1.8651236942257576\n4.95,2.353814429544512\n5,2.8366218546322624\n5.05,3.3123392023675367\n5.1,3.7797774271298024\n5.15,4.237768176794282\n5.2,4.685166713003771\n5.25,5.120854772418407\n5.3,5.543743361791607\n5.35,5.952775479886061\n5.4,6.346928759426347\n5.45,6.725218022484659\n5.5,7.0866977429126\n5.55,7.430464409664099\n5.6,7.755658785102496\n5.65,8.061468052647157\n5.7,8.347127848391597\n5.75,8.611924171615208\n5.8,8.85519516941319\n5.85,9.076332790984132\n5.9,9.27478430744036\n5.95,9.450053693342275\n6,9.601702866503661\n6.05,9.729352782968974\n6.1,9.832684384425844\n6.15,9.91143939568469\n6.2,9.965420970232175\n6.25,9.994494182244994\n6.3,9.998586363834152\n6.35,9.97768728667684\n6.4,9.931849187581927\n6.45,9.861186637925126\n6.5,9.765876257280235\n6.55,9.64615627196218\n6.6,9.502325919585296\n6.65,9.334744701125118\n6.7,9.143831482353194\n6.75,8.930063446890767\n6.8,8.693974903498253\n6.85,8.436155950581597\n6.9,8.157251001253568\n6.95,7.857957172636611\n7,7.539022543433046\n7.05,7.201244284117942\n7.1,6.845466664428066\n7.15,6.472578943127236\n7.2,6.083513145322545\n7.25,5.679241732886949\n7.3,5.260775173811053\n7.35,4.829159416559378\n7.4,4.385473275743903\n7.45,3.9308257356494076",
"scenarioId": "csv_content"
}
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"csvContent": "x2,x^2\n-7.5,56.25\n-7.45,55.502500000000005\n-7.4,54.760000000000005\n-7.35,54.022499999999994\n-7.3,53.29\n-7.25,52.5625\n-7.2,51.84\n-7.15,51.1225\n-7.1,50.41\n-7.05,49.7025\n-7,49\n-6.95,48.3025\n-6.9,47.61000000000001\n-6.85,46.92249999999999\n-6.8,46.239999999999995\n-6.75,45.5625\n-6.7,44.89\n-6.65,44.222500000000004\n-6.6,43.559999999999995\n-6.55,42.902499999999996\n-6.5,42.25\n-6.45,41.6025\n-6.4,40.96000000000001\n-6.35,40.3225\n-6.3,39.69\n-6.25,39.0625\n-6.2,38.440000000000005\n-6.15,37.822500000000005\n-6.1,37.209999999999994\n-6.05,36.6025\n-6,36\n-5.95,35.4025\n-5.9,34.81\n-5.85,34.2225\n-5.8,33.64\n-5.75,33.0625\n-5.7,32.49\n-5.65,31.922500000000003\n-5.6,31.359999999999996\n-5.55,30.8025\n-5.5,30.25\n-5.45,29.7025\n-5.4,29.160000000000004\n-5.35,28.622499999999995\n-5.3,28.09\n-5.25,27.5625\n-5.2,27.040000000000003\n-5.15,26.522500000000004\n-5.1,26.009999999999998\n-5.05,25.502499999999998\n-5,25\n-4.95,24.5025\n-4.9,24.010000000000005\n-4.85,23.522499999999997\n-4.8,23.04\n-4.75,22.5625\n-4.7,22.090000000000003\n-4.65,21.622500000000002\n-4.6,21.159999999999997\n-4.55,20.702499999999997\n-4.5,20.25\n-4.45,19.802500000000002\n-4.4,19.360000000000003\n-4.35,18.922499999999996\n-4.3,18.49\n-4.25,18.0625\n-4.2,17.64\n-4.15,17.222500000000004\n-4.1,16.81\n-4.05,16.4025\n-4,16\n-3.95,15.602500000000001\n-3.9,15.209999999999999\n-3.85,14.822500000000002\n-3.8,14.44\n-3.75,14.0625\n-3.7,13.690000000000001\n-3.65,13.3225\n-3.6,12.96\n-3.55,12.6025\n-3.5,12.25\n-3.45,11.902500000000002\n-3.4,11.559999999999999\n-3.35,11.2225\n-3.3,10.889999999999999\n-3.25,10.5625\n-3.2,10.240000000000002\n-3.15,9.9225\n-3.1,9.610000000000001\n-3.05,9.302499999999998\n-3,9\n-2.95,8.7025\n-2.9,8.41\n-2.85,8.1225\n-2.8,7.839999999999999\n-2.75,7.5625\n-2.7,7.290000000000001\n-2.65,7.0225\n-2.6,6.760000000000001\n-2.55,6.5024999999999995\n-2.5,6.25\n-2.45,6.002500000000001\n-2.4,5.76\n-2.35,5.522500000000001\n-2.3,5.289999999999999\n-2.25,5.0625\n-2.2,4.840000000000001\n-2.15,4.6225\n-2.1,4.41\n-2.05,4.2025\n-2,4\n-1.95,3.8024999999999998\n-1.9,3.61\n-1.85,3.4225000000000003\n-1.8,3.24\n-1.75,3.0625\n-1.7,2.8899999999999997\n-1.65,2.7224999999999997\n-1.6,2.5600000000000005\n-1.55,2.4025000000000003\n-1.5,2.25\n-1.45,2.1025\n-1.4,1.9599999999999997\n-1.35,1.8225000000000002\n-1.3,1.6900000000000002\n-1.25,1.5625\n-1.2,1.44\n-1.15,1.3224999999999998\n-1.1,1.2100000000000002\n-1.05,1.1025\n-1,1\n-0.95,0.9025\n-0.9,0.81\n-0.85,0.7224999999999999\n-0.8,0.6400000000000001\n-0.75,0.5625\n-0.7,0.48999999999999994\n-0.65,0.42250000000000004\n-0.6,0.36\n-0.55,0.30250000000000005\n-0.5,0.25\n-0.45,0.2025\n-0.4,0.16000000000000003\n-0.35,0.12249999999999998\n-0.3,0.09\n-0.25,0.0625\n-0.2,0.04000000000000001\n-0.15,0.0225\n-0.1,0.010000000000000002\n-0.05,0.0025000000000000005\n0,0\n0.05,0.0025000000000000005\n0.1,0.010000000000000002\n0.15,0.0225\n0.2,0.04000000000000001\n0.25,0.0625\n0.3,0.09\n0.35,0.12249999999999998\n0.4,0.16000000000000003\n0.45,0.2025\n0.5,0.25\n0.55,0.30250000000000005\n0.6,0.36\n0.65,0.42250000000000004\n0.7,0.48999999999999994\n0.75,0.5625\n0.8,0.6400000000000001\n0.85,0.7224999999999999\n0.9,0.81\n0.95,0.9025\n1,1\n1.05,1.1025\n1.1,1.2100000000000002\n1.15,1.3224999999999998\n1.2,1.44\n1.25,1.5625\n1.3,1.6900000000000002\n1.35,1.8225000000000002\n1.4,1.9599999999999997\n1.45,2.1025\n1.5,2.25\n1.55,2.4025000000000003\n1.6,2.5600000000000005\n1.65,2.7224999999999997\n1.7,2.8899999999999997\n1.75,3.0625\n1.8,3.24\n1.85,3.4225000000000003\n1.9,3.61\n1.95,3.8024999999999998\n2,4\n2.05,4.2025\n2.1,4.41\n2.15,4.6225\n2.2,4.840000000000001\n2.25,5.0625\n2.3,5.289999999999999\n2.35,5.522500000000001\n2.4,5.76\n2.45,6.002500000000001\n2.5,6.25\n2.55,6.5024999999999995\n2.6,6.760000000000001\n2.65,7.0225\n2.7,7.290000000000001\n2.75,7.5625\n2.8,7.839999999999999\n2.85,8.1225\n2.9,8.41\n2.95,8.7025\n3,9\n3.05,9.302499999999998\n3.1,9.610000000000001\n3.15,9.9225\n3.2,10.240000000000002\n3.25,10.5625\n3.3,10.889999999999999\n3.35,11.2225\n3.4,11.559999999999999\n3.45,11.902500000000002\n3.5,12.25\n3.55,12.6025\n3.6,12.96\n3.65,13.3225\n3.7,13.690000000000001\n3.75,14.0625\n3.8,14.44\n3.85,14.822500000000002\n3.9,15.209999999999999\n3.95,15.602500000000001\n4,16\n4.05,16.4025\n4.1,16.81\n4.15,17.222500000000004\n4.2,17.64\n4.25,18.0625\n4.3,18.49\n4.35,18.922499999999996\n4.4,19.360000000000003\n4.45,19.802500000000002\n4.5,20.25\n4.55,20.702499999999997\n4.6,21.159999999999997\n4.65,21.622500000000002\n4.7,22.090000000000003\n4.75,22.5625\n4.8,23.04\n4.85,23.522499999999997\n4.9,24.010000000000005\n4.95,24.5025\n5,25\n5.05,25.502499999999998\n5.1,26.009999999999998\n5.15,26.522500000000004\n5.2,27.040000000000003\n5.25,27.5625\n5.3,28.09\n5.35,28.622499999999995\n5.4,29.160000000000004\n5.45,29.7025\n5.5,30.25\n5.55,30.8025\n5.6,31.359999999999996\n5.65,31.922500000000003\n5.7,32.49\n5.75,33.0625\n5.8,33.64\n5.85,34.2225\n5.9,34.81\n5.95,35.4025\n6,36\n6.05,36.6025\n6.1,37.209999999999994\n6.15,37.822500000000005\n6.2,38.440000000000005\n6.25,39.0625\n6.3,39.69\n6.35,40.3225\n6.4,40.96000000000001\n6.45,41.6025\n6.5,42.25\n6.55,42.902499999999996\n6.6,43.559999999999995\n6.65,44.222500000000004\n6.7,44.89\n6.75,45.5625\n6.8,46.239999999999995\n6.85,46.92249999999999\n6.9,47.61000000000001\n6.95,48.3025\n7,49\n7.05,49.7025\n7.1,50.41\n7.15,51.1225\n7.2,51.84\n7.25,52.5625\n7.3,53.29\n7.35,54.022499999999994\n7.4,54.760000000000005\n7.45,55.502500000000005",
"scenarioId": "csv_content"
}
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"csvContent": "x3,sqrt(x)\n0,0\n0.05,2.23606797749979\n0.1,3.1622776601683795\n0.15,3.872983346207417\n0.2,4.47213595499958\n0.25,5\n0.3,5.47722557505166\n0.35,5.916079783099616\n0.4,6.324555320336759\n0.45,6.708203932499369\n0.5,7.0710678118654755\n0.55,7.416198487095663\n0.6,7.745966692414834\n0.65,8.062257748298551\n0.7,8.366600265340756\n0.75,8.660254037844386\n0.8,8.94427190999916\n0.85,9.219544457292887\n0.9,9.486832980505138\n0.95,9.746794344808963\n1,10\n1.05,10.2469507659596\n1.1,10.488088481701517\n1.15,10.723805294763608\n1.2,10.95445115010332\n1.25,11.180339887498949\n1.3,11.401754250991381\n1.35,11.618950038622252\n1.4,11.832159566199232\n1.45,12.041594578792296\n1.5,12.24744871391589\n1.55,12.449899597988733\n1.6,12.649110640673518\n1.65,12.84523257866513\n1.7,13.038404810405297\n1.75,13.228756555322953\n1.8,13.416407864998739\n1.85,13.601470508735442\n1.9,13.784048752090222\n1.95,13.96424004376894\n2,14.142135623730951\n2.05,14.317821063276352\n2.1,14.49137674618944\n2.15,14.66287829861518\n2.2,14.832396974191326\n2.25,15\n2.3,15.1657508881031\n2.35,15.329709716755893\n2.4,15.491933384829668\n2.45,15.652475842498529\n2.5,15.811388300841898\n2.55,15.968719422671311\n2.6,16.124515496597102\n2.65,16.278820596099706\n2.7,16.431676725154986\n2.75,16.583123951776997\n2.8,16.73320053068151\n2.85,16.881943016134134\n2.9,17.0293863659264\n2.95,17.175564037317667\n3,17.32050807568877\n3.05,17.46424919657298\n3.1,17.60681686165901\n3.15,17.74823934929885\n3.2,17.88854381999832\n3.25,18.027756377319946\n3.3,18.16590212458495\n3.35,18.303005217723125\n3.4,18.439088914585774\n3.45,18.57417562100671\n3.5,18.708286933869708\n3.55,18.84144368141677\n3.6,18.973665961010276\n3.65,19.1049731745428\n3.7,19.235384061671347\n3.75,19.364916731037084\n3.8,19.493588689617926\n3.85,19.621416870348586\n3.9,19.748417658131498\n3.95,19.87460691435179\n4,20\n4.05,20.124611797498105\n4.1,20.248456731316583\n4.15,20.37154878746336\n4.2,20.4939015319192\n4.25,20.615528128088304\n4.3,20.73644135332772\n4.35,20.85665361461421\n4.4,20.976176963403034\n4.45,21.095023109728984\n4.5,21.213203435596423\n4.55,21.330729007701542\n4.6,21.447610589527216\n4.65,21.563858652847827\n4.7,21.6794833886788\n4.75,21.79449471770337\n4.8,21.90890230020664\n4.85,22.022715545545243\n4.9,22.135943621178654\n4.95,22.24859546128699\n5,22.360679774997898\n5.05,22.47220505424423\n5.1,22.58317958127243\n5.15,22.693611435820436\n5.2,22.803508501982762\n5.25,22.9128784747792\n5.3,23.021728866442675\n5.35,23.130067012440755\n5.4,23.237900077244504\n5.45,23.345235059857504\n5.5,23.45207879911715\n5.55,23.55843797877949\n5.6,23.664319132398465\n5.65,23.769728648009426\n5.7,23.874672772626646\n5.75,23.979157616563597\n5.8,24.08318915758459\n5.85,24.186773244895647\n5.9,24.289915602982237\n5.95,24.392621835300936\n6,24.49489742783178\n6.05,24.596747752497684\n6.1,24.698178070456937\n6.15,24.79919353527449\n6.2,24.899799195977465\n6.25,25\n6.3,25.099800796022265\n6.35,25.199206336708304\n6.4,25.298221281347036\n6.45,25.39685019840059\n6.5,25.495097567963924\n6.55,25.592967784139454\n6.6,25.69046515733026\n6.65,25.787593916455258\n6.7,25.88435821108957\n6.75,25.98076211353316\n6.8,26.076809620810593\n6.85,26.1725046566048\n6.9,26.267851073127396\n6.95,26.362852652928133\n7,26.457513110645905\n7.05,26.551836094703507\n7.1,26.645825188948457\n7.15,26.739483914241873\n7.2,26.832815729997478\n7.25,26.92582403567252\n7.3,27.018512172212592\n7.35,27.11088342345192\n7.4,27.202941017470884\n7.45,27.294688127912362",
"scenarioId": "csv_content"
}
},
"refId": "C",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"csvContent": "x4,-sqrt(x)\n0,0\n0.05,-2.23606797749979\n0.1,-3.1622776601683795\n0.15,-3.872983346207417\n0.2,-4.47213595499958\n0.25,-5\n0.3,-5.47722557505166\n0.35,-5.916079783099616\n0.4,-6.324555320336759\n0.45,-6.708203932499369\n0.5,-7.0710678118654755\n0.55,-7.416198487095663\n0.6,-7.745966692414834\n0.65,-8.062257748298551\n0.7,-8.366600265340756\n0.75,-8.660254037844386\n0.8,-8.94427190999916\n0.85,-9.219544457292887\n0.9,-9.486832980505138\n0.95,-9.746794344808963\n1,-10\n1.05,-10.2469507659596\n1.1,-10.488088481701517\n1.15,-10.723805294763608\n1.2,-10.95445115010332\n1.25,-11.180339887498949\n1.3,-11.401754250991381\n1.35,-11.618950038622252\n1.4,-11.832159566199232\n1.45,-12.041594578792296\n1.5,-12.24744871391589\n1.55,-12.449899597988733\n1.6,-12.649110640673518\n1.65,-12.84523257866513\n1.7,-13.038404810405297\n1.75,-13.228756555322953\n1.8,-13.416407864998739\n1.85,-13.601470508735442\n1.9,-13.784048752090222\n1.95,-13.96424004376894\n2,-14.142135623730951\n2.05,-14.317821063276352\n2.1,-14.49137674618944\n2.15,-14.66287829861518\n2.2,-14.832396974191326\n2.25,-15\n2.3,-15.1657508881031\n2.35,-15.329709716755893\n2.4,-15.491933384829668\n2.45,-15.652475842498529\n2.5,-15.811388300841898\n2.55,-15.968719422671311\n2.6,-16.124515496597102\n2.65,-16.278820596099706\n2.7,-16.431676725154986\n2.75,-16.583123951776997\n2.8,-16.73320053068151\n2.85,-16.881943016134134\n2.9,-17.0293863659264\n2.95,-17.175564037317667\n3,-17.32050807568877\n3.05,-17.46424919657298\n3.1,-17.60681686165901\n3.15,-17.74823934929885\n3.2,-17.88854381999832\n3.25,-18.027756377319946\n3.3,-18.16590212458495\n3.35,-18.303005217723125\n3.4,-18.439088914585774\n3.45,-18.57417562100671\n3.5,-18.708286933869708\n3.55,-18.84144368141677\n3.6,-18.973665961010276\n3.65,-19.1049731745428\n3.7,-19.235384061671347\n3.75,-19.364916731037084\n3.8,-19.493588689617926\n3.85,-19.621416870348586\n3.9,-19.748417658131498\n3.95,-19.87460691435179\n4,-20\n4.05,-20.124611797498105\n4.1,-20.248456731316583\n4.15,-20.37154878746336\n4.2,-20.4939015319192\n4.25,-20.615528128088304\n4.3,-20.73644135332772\n4.35,-20.85665361461421\n4.4,-20.976176963403034\n4.45,-21.095023109728984\n4.5,-21.213203435596423\n4.55,-21.330729007701542\n4.6,-21.447610589527216\n4.65,-21.563858652847827\n4.7,-21.6794833886788\n4.75,-21.79449471770337\n4.8,-21.90890230020664\n4.85,-22.022715545545243\n4.9,-22.135943621178654\n4.95,-22.24859546128699\n5,-22.360679774997898\n5.05,-22.47220505424423\n5.1,-22.58317958127243\n5.15,-22.693611435820436\n5.2,-22.803508501982762\n5.25,-22.9128784747792\n5.3,-23.021728866442675\n5.35,-23.130067012440755\n5.4,-23.237900077244504\n5.45,-23.345235059857504\n5.5,-23.45207879911715\n5.55,-23.55843797877949\n5.6,-23.664319132398465\n5.65,-23.769728648009426\n5.7,-23.874672772626646\n5.75,-23.979157616563597\n5.8,-24.08318915758459\n5.85,-24.186773244895647\n5.9,-24.289915602982237\n5.95,-24.392621835300936\n6,-24.49489742783178\n6.05,-24.596747752497684\n6.1,-24.698178070456937\n6.15,-24.79919353527449\n6.2,-24.899799195977465\n6.25,-25\n6.3,-25.099800796022265\n6.35,-25.199206336708304\n6.4,-25.298221281347036\n6.45,-25.39685019840059\n6.5,-25.495097567963924\n6.55,-25.592967784139454\n6.6,-25.69046515733026\n6.65,-25.787593916455258\n6.7,-25.88435821108957\n6.75,-25.98076211353316\n6.8,-26.076809620810593\n6.85,-26.1725046566048\n6.9,-26.267851073127396\n6.95,-26.362852652928133\n7,-26.457513110645905\n7.05,-26.551836094703507\n7.1,-26.645825188948457\n7.15,-26.739483914241873\n7.2,-26.832815729997478\n7.25,-26.92582403567252\n7.3,-27.018512172212592\n7.35,-27.11088342345192\n7.4,-27.202941017470884\n7.45,-27.294688127912362",
"scenarioId": "csv_content"
}
},
"refId": "D",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "xychart",
"spec": {
"pluginVersion": "",
"options": {
"dims": {
"frame": 0
},
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"series": [
{
"name": "cos(x)",
"pointColor": {
"fixed": "green"
},
"pointSize": {
"fixed": 5,
"max": 20,
"min": 1
},
"x": "x",
"y": "cos(x)"
},
{
"name": "x^2",
"pointColor": {
"fixed": "orange"
},
"pointSize": {
"fixed": 5,
"max": 20,
"min": 1
},
"x": "x2",
"y": "x^2"
},
{
"name": "sqrt(x)",
"pointColor": {
"fixed": "blue"
},
"pointSize": {
"fixed": 5,
"max": 20,
"min": 1
},
"x": "x3",
"y": "sqrt(x)"
},
{
"name": "-sqrt(x)",
"pointColor": {
"fixed": "purple"
},
"pointSize": {
"fixed": 5,
"max": 20,
"min": 1
},
"x": "x4",
"y": "-sqrt(x)"
}
],
"seriesMapping": "manual",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "fixed",
"fixedColor": "red"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1,
"pointSize": {
"fixed": 5
},
"scaleDistribution": {
"type": "linear"
},
"show": "lines"
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "x^2"
},
"properties": [
{
"id": "custom.lineStyle",
"value": {
"dash": [
10,
15
],
"fill": "dash"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "cos(x)"
},
"properties": [
{
"id": "custom.axisLabel",
"value": "y"
}
]
}
]
}
}
}
}
},
"panel-4": {
"kind": "Panel",
"spec": {
"id": 4,
"title": "Height vs Weight Samples",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"csvFileName": "weight_height.csv",
"scenarioId": "csv_file"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [
{
"kind": "partitionByValues",
"spec": {
"id": "partitionByValues",
"options": {
"fields": [
"Gender"
]
}
}
}
],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "xychart",
"spec": {
"pluginVersion": "",
"options": {
"dims": {
"exclude": [],
"frame": 0,
"x": "A-series"
},
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"series": [
{
"name": "Male",
"pointColor": {
"fixed": "#5795f2"
},
"pointSize": {
"fixed": 5,
"max": 20,
"min": 1
},
"x": "Height Male",
"y": "Weight Male"
},
{
"name": "Female",
"pointColor": {
"fixed": "#ff9830"
},
"pointSize": {
"fixed": 5,
"max": 20,
"min": 1
},
"x": "Height Female",
"y": "Weight Female"
}
],
"seriesMapping": "manual",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "fixed",
"fixedColor": "red"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1,
"pointSize": {
"fixed": 5
},
"scaleDistribution": {
"type": "linear"
},
"show": "points"
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Weight Male"
},
"properties": [
{
"id": "custom.axisLabel",
"value": "Weight"
}
]
},
{
"matcher": {
"id": "byName",
"options": "Height Male"
},
"properties": [
{
"id": "custom.axisLabel",
"value": "Height"
}
]
}
]
}
}
}
}
},
"panel-5": {
"kind": "Panel",
"spec": {
"id": 5,
"title": "Bubble Charts",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"csvFileName": "flight_info_by_state.csv",
"scenarioId": "csv_file"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "xychart",
"spec": {
"pluginVersion": "",
"options": {
"dims": {
"frame": 0
},
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"series": [
{
"name": "Price",
"pointColor": {
"fixed": "#fade2a40"
},
"pointSize": {
"field": "Price",
"fixed": 5,
"max": 50,
"min": 1
},
"x": "Lat",
"y": "Lng"
}
],
"seriesMapping": "manual",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "fixed",
"fixedColor": "red"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1,
"pointSize": {
"fixed": 5
},
"scaleDistribution": {
"type": "linear"
},
"show": "points"
}
},
"overrides": []
}
}
}
}
},
"panel-7": {
"kind": "Panel",
"spec": {
"id": 7,
"title": "MPG vs HP (by Country)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"csvFileName": "automobiles.csv",
"scenarioId": "csv_file"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [
{
"kind": "partitionByValues",
"spec": {
"id": "partitionByValues",
"options": {
"fields": [
"Origin"
]
}
}
}
],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "xychart",
"spec": {
"pluginVersion": "",
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"series": [
{
"name": "USA",
"pointColor": {
"fixed": "#f2495c80"
},
"pointSize": {
"fixed": 5,
"max": 20,
"min": 1
},
"x": "Horsepower USA",
"y": "Miles_per_Gallon USA"
},
{
"name": "Europe",
"pointColor": {
"fixed": "#5795f280"
},
"pointSize": {
"fixed": 5,
"max": 20,
"min": 1
},
"x": "Horsepower Europe",
"y": "Miles_per_Gallon Europe"
},
{
"name": "Japan",
"pointColor": {
"fixed": "#ff983080"
},
"pointSize": {
"fixed": 5,
"max": 20,
"min": 1
},
"x": "Horsepower Japan",
"y": "Miles_per_Gallon Japan"
}
],
"seriesMapping": "manual",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1,
"pointSize": {
"fixed": 5
},
"scaleDistribution": {
"type": "linear"
},
"show": "points"
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Miles_per_Gallon USA"
},
"properties": [
{
"id": "custom.axisLabel",
"value": "Miles_per_Gallon"
}
]
},
{
"matcher": {
"id": "byName",
"options": "Horsepower USA"
},
"properties": [
{
"id": "custom.axisLabel",
"value": "Horsepower"
}
]
}
]
}
}
}
}
},
"panel-8": {
"kind": "Panel",
"spec": {
"id": 8,
"title": "MPG vs Acceleration (by Country)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"csvFileName": "automobiles.csv",
"scenarioId": "csv_file"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [
{
"kind": "partitionByValues",
"spec": {
"id": "partitionByValues",
"options": {
"fields": [
"Origin"
]
}
}
}
],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "xychart",
"spec": {
"pluginVersion": "",
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"series": [
{
"name": "USA",
"pointColor": {
"fixed": "#f2495c80"
},
"pointSize": {
"fixed": 5,
"max": 20,
"min": 1
},
"x": "Acceleration USA",
"y": "Miles_per_Gallon USA"
},
{
"name": "Europe",
"pointColor": {
"fixed": "#5795f280"
},
"pointSize": {
"fixed": 5,
"max": 20,
"min": 1
},
"x": "Acceleration Europe",
"y": "Miles_per_Gallon Europe"
},
{
"name": "Japan",
"pointColor": {
"fixed": "#ff983080"
},
"pointSize": {
"fixed": 5,
"max": 20,
"min": 1
},
"x": "Acceleration Japan",
"y": "Miles_per_Gallon Japan"
}
],
"seriesMapping": "manual",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1,
"pointSize": {
"fixed": 5
},
"scaleDistribution": {
"type": "linear"
},
"show": "points"
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Miles_per_Gallon USA"
},
"properties": [
{
"id": "custom.axisLabel",
"value": "Miles_per_Gallon"
}
]
},
{
"matcher": {
"id": "byName",
"options": "Acceleration USA"
},
"properties": [
{
"id": "custom.axisLabel",
"value": "Acceleration"
}
]
}
]
}
}
}
}
}
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 20,
"element": {
"kind": "ElementReference",
"name": "panel-2"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 6,
"y": 0,
"width": 6,
"height": 20,
"element": {
"kind": "ElementReference",
"name": "panel-4"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 0,
"width": 9,
"height": 17,
"element": {
"kind": "ElementReference",
"name": "panel-3"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 21,
"y": 0,
"width": 3,
"height": 7,
"element": {
"kind": "ElementReference",
"name": "panel-10"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 17,
"width": 9,
"height": 15,
"element": {
"kind": "ElementReference",
"name": "panel-5"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 20,
"width": 6,
"height": 12,
"element": {
"kind": "ElementReference",
"name": "panel-7"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 6,
"y": 20,
"width": 6,
"height": 12,
"element": {
"kind": "ElementReference",
"name": "panel-8"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 32,
"width": 6,
"height": 12,
"element": {
"kind": "ElementReference",
"name": "panel-11"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 6,
"y": 32,
"width": 6,
"height": 12,
"element": {
"kind": "ElementReference",
"name": "panel-12"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 32,
"width": 6,
"height": 12,
"element": {
"kind": "ElementReference",
"name": "panel-13"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 18,
"y": 32,
"width": 6,
"height": 12,
"element": {
"kind": "ElementReference",
"name": "panel-14"
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [
"gdev",
"panel-tests",
"graph-ng"
],
"timeSettings": {
"timezone": "",
"from": "2022-10-07T05:04:04.516Z",
"to": "2022-10-07T17:04:04.516Z",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Panel Tests - XY Chart migrations",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
}
|
json
|
github
|
https://github.com/grafana/grafana
|
apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/panel-xychart/v0alpha1.xychart-migrations.v42.v2alpha1.json
|
# Generated by Django 2.0.7 on 2018-07-12 17:10
import django.contrib.auth.validators
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('ui', '0020_auto_20180608_1144'),
]
operations = [
migrations.AddField(
model_name='historicalcollection',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalcollectionset',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalcredential',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalseed',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='collection',
name='end_date',
field=models.DateTimeField(blank=True, help_text='If blank, will continue until stopped.', null=True),
),
migrations.AlterField(
model_name='collection',
name='harvest_type',
field=models.CharField(choices=[('twitter_user_timeline', 'Twitter user timeline'), ('twitter_search', 'Twitter search'), ('twitter_filter', 'Twitter filter'), ('twitter_sample', 'Twitter sample'), ('tumblr_blog_posts', 'Tumblr blog posts'), ('flickr_user', 'Flickr user'), ('weibo_timeline', 'Weibo timeline')], max_length=255),
),
migrations.AlterField(
model_name='collection',
name='link',
field=models.CharField(blank=True, max_length=512, verbose_name='Public link'),
),
migrations.AlterField(
model_name='collection',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection name'),
),
migrations.AlterField(
model_name='collection',
name='schedule_minutes',
field=models.PositiveIntegerField(choices=[(1, 'One time harvest'), (30, 'Every 30 minutes'), (60, 'Every hour'), (240, 'Every 4 hours'), (720, 'Every 12 hours'), (1440, 'Every day'), (10080, 'Every week'), (40320, 'Every 4 weeks'), (5, 'Every 5 minutes')], default=10080, null=True, verbose_name='schedule'),
),
migrations.AlterField(
model_name='collection',
name='visibility',
field=models.CharField(choices=[('default', 'Group only'), ('local', 'All other users')], default='default', help_text='Who else can view and export from this collection. Select "All other users" to share with all Social Feed Manager users.', max_length=255),
),
migrations.AlterField(
model_name='collectionset',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection set name'),
),
migrations.AlterField(
model_name='credential',
name='name',
field=models.CharField(max_length=255, verbose_name='Credential name'),
),
migrations.AlterField(
model_name='credential',
name='platform',
field=models.CharField(choices=[('twitter', 'Twitter'), ('flickr', 'Flickr'), ('weibo', 'Weibo'), ('tumblr', 'Tumblr')], help_text='Platform name', max_length=255),
),
migrations.AlterField(
model_name='export',
name='errors',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='export',
name='export_format',
field=models.CharField(choices=[('xlsx', 'Excel (XLSX)'), ('csv', 'Comma separated values (CSV)'), ('tsv', 'Tab separated values (TSV)'), ('json_full', 'Full JSON'), ('json', 'JSON of limited fields'), ('dehydrate', 'Text file of identifiers (dehydrate)')], default='xlsx', max_length=10),
),
migrations.AlterField(
model_name='export',
name='export_segment_size',
field=models.BigIntegerField(blank=True, choices=[(100000, '100,000'), (250000, '250,000'), (500000, '500,000'), (100000, '1,000,000'), (None, 'Single file'), (100, '100')], default=250000, null=True),
),
migrations.AlterField(
model_name='export',
name='infos',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='export',
name='status',
field=models.CharField(choices=[('not requested', 'Not requested'), ('requested', 'Requested'), ('running', 'Running'), ('completed success', 'Success'), ('completed failure', 'Failure')], default='not requested', max_length=20),
),
migrations.AlterField(
model_name='export',
name='warnings',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='harvest',
name='errors',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='harvest',
name='infos',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='harvest',
name='status',
field=models.CharField(choices=[('requested', 'Requested'), ('completed success', 'Success'), ('completed failure', 'Completed with errors'), ('running', 'Running'), ('stop requested', 'Stop requested'), ('stopping', 'Stopping'), ('voided', 'Voided'), ('skipped', 'Skipped'), ('paused', 'Paused')], default='requested', max_length=20),
),
migrations.AlterField(
model_name='harvest',
name='token_updates',
field=jsonfield.fields.JSONField(blank=True, default={}),
),
migrations.AlterField(
model_name='harvest',
name='uids',
field=jsonfield.fields.JSONField(blank=True, default={}),
),
migrations.AlterField(
model_name='harvest',
name='warnings',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='historicalcollection',
name='end_date',
field=models.DateTimeField(blank=True, help_text='If blank, will continue until stopped.', null=True),
),
migrations.AlterField(
model_name='historicalcollection',
name='harvest_type',
field=models.CharField(choices=[('twitter_user_timeline', 'Twitter user timeline'), ('twitter_search', 'Twitter search'), ('twitter_filter', 'Twitter filter'), ('twitter_sample', 'Twitter sample'), ('tumblr_blog_posts', 'Tumblr blog posts'), ('flickr_user', 'Flickr user'), ('weibo_timeline', 'Weibo timeline')], max_length=255),
),
migrations.AlterField(
model_name='historicalcollection',
name='link',
field=models.CharField(blank=True, max_length=512, verbose_name='Public link'),
),
migrations.AlterField(
model_name='historicalcollection',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection name'),
),
migrations.AlterField(
model_name='historicalcollection',
name='schedule_minutes',
field=models.PositiveIntegerField(choices=[(1, 'One time harvest'), (30, 'Every 30 minutes'), (60, 'Every hour'), (240, 'Every 4 hours'), (720, 'Every 12 hours'), (1440, 'Every day'), (10080, 'Every week'), (40320, 'Every 4 weeks'), (5, 'Every 5 minutes')], default=10080, null=True, verbose_name='schedule'),
),
migrations.AlterField(
model_name='historicalcollection',
name='visibility',
field=models.CharField(choices=[('default', 'Group only'), ('local', 'All other users')], default='default', help_text='Who else can view and export from this collection. Select "All other users" to share with all Social Feed Manager users.', max_length=255),
),
migrations.AlterField(
model_name='historicalcollectionset',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection set name'),
),
migrations.AlterField(
model_name='historicalcredential',
name='name',
field=models.CharField(max_length=255, verbose_name='Credential name'),
),
migrations.AlterField(
model_name='historicalcredential',
name='platform',
field=models.CharField(choices=[('twitter', 'Twitter'), ('flickr', 'Flickr'), ('weibo', 'Weibo'), ('tumblr', 'Tumblr')], help_text='Platform name', max_length=255),
),
migrations.AlterField(
model_name='user',
name='email_frequency',
field=models.CharField(choices=[('daily', 'Daily'), ('weekly', 'Weekly'), ('monthly', 'Monthly'), ('none', 'None')], default='daily', max_length=10),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=150, verbose_name='last name'),
),
migrations.AlterField(
model_name='user',
name='local_id',
field=models.CharField(blank=True, default='', help_text='Local identifier', max_length=255),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.compat import six
class Rule(object):
"""
A Lifecycle rule for an S3 bucket.
:ivar id: Unique identifier for the rule. The value cannot be longer
than 255 characters. This value is optional. The server will
generate a unique value for the rule if no value is provided.
:ivar prefix: Prefix identifying one or more objects to which the
rule applies. If prefix is not provided, Boto generates a default
prefix which will match all objects.
:ivar status: If 'Enabled', the rule is currently being applied.
If 'Disabled', the rule is not currently being applied.
:ivar expiration: An instance of `Expiration`. This indicates
the lifetime of the objects that are subject to the rule.
:ivar transition: An instance of `Transition`. This indicates
when to transition to a different storage class.
"""
def __init__(self, id=None, prefix=None, status=None, expiration=None,
transition=None):
self.id = id
self.prefix = '' if prefix is None else prefix
self.status = status
if isinstance(expiration, six.integer_types):
# retain backwards compatibility???
self.expiration = Expiration(days=expiration)
else:
# None or object
self.expiration = expiration
self.transition = transition
def __repr__(self):
return '<Rule: %s>' % self.id
def startElement(self, name, attrs, connection):
if name == 'Transition':
self.transition = Transition()
return self.transition
elif name == 'Expiration':
self.expiration = Expiration()
return self.expiration
return None
def endElement(self, name, value, connection):
if name == 'ID':
self.id = value
elif name == 'Prefix':
self.prefix = value
elif name == 'Status':
self.status = value
else:
setattr(self, name, value)
def to_xml(self):
s = '<Rule>'
if self.id is not None:
s += '<ID>%s</ID>' % self.id
s += '<Prefix>%s</Prefix>' % self.prefix
s += '<Status>%s</Status>' % self.status
if self.expiration is not None:
s += self.expiration.to_xml()
if self.transition is not None:
s += self.transition.to_xml()
s += '</Rule>'
return s
class Expiration(object):
"""
When an object will expire.
:ivar days: The number of days until the object expires
:ivar date: The date when the object will expire. Must be
in ISO 8601 format.
"""
def __init__(self, days=None, date=None):
self.days = days
self.date = date
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Days':
self.days = int(value)
elif name == 'Date':
self.date = value
def __repr__(self):
if self.days is None:
how_long = "on: %s" % self.date
else:
how_long = "in: %s days" % self.days
return '<Expiration: %s>' % how_long
def to_xml(self):
s = '<Expiration>'
if self.days is not None:
s += '<Days>%s</Days>' % self.days
elif self.date is not None:
s += '<Date>%s</Date>' % self.date
s += '</Expiration>'
return s
class Transition(object):
"""
A transition to a different storage class.
:ivar days: The number of days until the object should be moved.
:ivar date: The date when the object should be moved. Should be
in ISO 8601 format.
:ivar storage_class: The storage class to transition to. Valid
values are GLACIER.
"""
def __init__(self, days=None, date=None, storage_class=None):
self.days = days
self.date = date
self.storage_class = storage_class
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Days':
self.days = int(value)
elif name == 'Date':
self.date = value
elif name == 'StorageClass':
self.storage_class = value
def __repr__(self):
if self.days is None:
how_long = "on: %s" % self.date
else:
how_long = "in: %s days" % self.days
return '<Transition: %s, %s>' % (how_long, self.storage_class)
def to_xml(self):
s = '<Transition>'
s += '<StorageClass>%s</StorageClass>' % self.storage_class
if self.days is not None:
s += '<Days>%s</Days>' % self.days
elif self.date is not None:
s += '<Date>%s</Date>' % self.date
s += '</Transition>'
return s
class Lifecycle(list):
"""
A container for the rules associated with a Lifecycle configuration.
"""
def startElement(self, name, attrs, connection):
if name == 'Rule':
rule = Rule()
self.append(rule)
return rule
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
def to_xml(self):
"""
Returns a string containing the XML version of the Lifecycle
configuration as defined by S3.
"""
s = '<?xml version="1.0" encoding="UTF-8"?>'
s += '<LifecycleConfiguration>'
for rule in self:
s += rule.to_xml()
s += '</LifecycleConfiguration>'
return s
def add_rule(self, id=None, prefix='', status='Enabled',
expiration=None, transition=None):
"""
Add a rule to this Lifecycle configuration. This only adds
the rule to the local copy. To install the new rule(s) on
the bucket, you need to pass this Lifecycle config object
to the configure_lifecycle method of the Bucket object.
:type id: str
:param id: Unique identifier for the rule. The value cannot be longer
than 255 characters. This value is optional. The server will
generate a unique value for the rule if no value is provided.
:type prefix: str
:iparam prefix: Prefix identifying one or more objects to which the
rule applies.
:type status: str
:param status: If 'Enabled', the rule is currently being applied.
If 'Disabled', the rule is not currently being applied.
:type expiration: int
:param expiration: Indicates the lifetime, in days, of the objects
that are subject to the rule. The value must be a non-zero
positive integer. A Expiration object instance is also perfect.
:type transition: Transition
:param transition: Indicates when an object transitions to a
different storage class.
"""
rule = Rule(id, prefix, status, expiration, transition)
self.append(rule)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Hackerland is a one-dimensional city with n houses, where each house i is
# located at some xi on the x-axis. The Mayor wants to install radio
# transmitters on the roofs of the city's houses. Each transmitter has a range,
# k, meaning it can transmit a signal to all houses <=k units of distance away.
#
# Given a map of Hackerland and the value of k, can you find and print the
# minimum number of transmitters needed to cover every house in the city?
# (Every house must be covered by at least one transmitter) Each transmitter
# must be installed on top of an existing house.
#
# Input Format:
# The first line contains two space-separated integers describing the
# respective values of n (the number of houses in Hackerland) and k (the range
# of each transmitter).
# The second line contains n space-separated integers describing the
# respective locations of each house (i.e., x1, x2, ..., xn).
#
# Constraints:
# 1 <= n,k <= 10^5
# 1 <= xi <= 10^5
# There may be more than one house at the same location.
#
# Subtasks:
# 1 <= n <= 1000 for 50% of the maximum score.
#
# Output Format:
# Print a single integer denoting the minimum number of transmitters needed to
# cover all the houses.
#
# Sample Input 0
# 5 1
# 1 2 3 4 5
#
# Sample Output 0
# 2
#
# Explanation 0:
# We can cover the entire city by installing transmitters on houses at
# locations 2 and 4. Thus, we print on a new line.
n, k = map(int, raw_input().strip().split())
x = sorted(map(int, raw_input().strip().split()))
transmitters = i = 0
placed = -1
while i < n:
if placed == -1:
j = i + 1
while j < n and (x[j] - x[i]) <= k:
j += 1
placed = x[j - 1]
transmitters += 1
i = j
elif x[i] - placed > k:
placed = -1
else:
i += 1
print transmitters
|
unknown
|
codeparrot/codeparrot-clean
| ||
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Targeting'
db.create_table('facebook_ads_targeting', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('countries', self.gf('facebook_ads.fields.CommaSeparatedCharField')(max_length=100, blank=True)),
('cities', self.gf('django.db.models.fields.CharField')(max_length=100)),
('zips', self.gf('django.db.models.fields.CharField')(max_length=100)),
('regions', self.gf('django.db.models.fields.CharField')(max_length=100)),
('radius', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('locales', self.gf('django.db.models.fields.CharField')(max_length=100)),
('keywords', self.gf('facebook_ads.fields.CommaSeparatedCharField')(max_length=100)),
('user_adclusters', self.gf('django.db.models.fields.CharField')(max_length=100)),
('interested_in', self.gf('django.db.models.fields.CharField')(max_length=100)),
('genders', self.gf('facebook_ads.fields.CommaSeparatedCharField')(max_length=5, null=True, blank=True)),
('age_min', self.gf('facebook_ads.fields.PositiveSmallIntegerRangeField')(null=True, blank=True)),
('age_max', self.gf('facebook_ads.fields.PositiveSmallIntegerRangeField')(null=True, blank=True)),
('broad_age', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
('relationship_statuses', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=100, null=True, blank=True)),
('user_event', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=100)),
('connections', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=100)),
('excluded_connections', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=100)),
('friends_of_connections', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=100)),
('college_networks', self.gf('django.db.models.fields.CharField')(max_length=100)),
('work_networks', self.gf('django.db.models.fields.CharField')(max_length=100)),
('education_statuses', self.gf('facebook_ads.fields.CommaSeparatedCharField')(max_length=100)),
('college_years', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=100)),
('college_majors', self.gf('facebook_ads.fields.CommaSeparatedCharField')(max_length=100)),
))
db.send_create_signal('facebook_ads', ['Targeting'])
# Adding field 'AdGroup.targeting'
db.add_column('facebook_ads_adgroup', 'targeting', self.gf('django.db.models.fields.related.OneToOneField')(default=0, to=orm['facebook_ads.Targeting'], unique=True), keep_default=False)
def backwards(self, orm):
# Deleting model 'Targeting'
db.delete_table('facebook_ads_targeting')
# Deleting field 'AdGroup.targeting'
db.delete_column('facebook_ads_adgroup', 'targeting_id')
models = {
'facebook_ads.adaccount': {
'Meta': {'object_name': 'AdAccount'},
'account_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'account_status': ('django.db.models.fields.SmallIntegerField', [], {}),
'business_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_country_code': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_street2': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_zip': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'daily_spend_limit': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_personal': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'timezone_id': ('django.db.models.fields.IntegerField', [], {}),
'timezone_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'vat_status': ('django.db.models.fields.IntegerField', [], {})
},
'facebook_ads.adcampaign': {
'Meta': {'object_name': 'AdCampaign'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['facebook_ads.AdAccount']"}),
'campaign_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'campaign_status': ('django.db.models.fields.SmallIntegerField', [], {}),
'daily_budget': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'daily_imps': ('django.db.models.fields.IntegerField', [], {}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lifetime_budget': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {}),
'updated_time': ('django.db.models.fields.DateTimeField', [], {})
},
'facebook_ads.adcreative': {
'Meta': {'object_name': 'AdCreative'},
'auto_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '135'}),
'count_current_adgroups': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'creative_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_hash': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'image_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'link_url': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'object_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'preview_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'related_fan_page': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'run_status': ('django.db.models.fields.SmallIntegerField', [], {}),
'story_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'view_tag': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'facebook_ads.adgroup': {
'Meta': {'object_name': 'AdGroup'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['facebook_ads.AdAccount']"}),
'ad_id': ('django.db.models.fields.BigIntegerField', [], {}),
'ad_status': ('django.db.models.fields.IntegerField', [], {}),
'adgroup_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'adgroup_status': ('django.db.models.fields.IntegerField', [], {}),
'bid_type': ('django.db.models.fields.IntegerField', [], {}),
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['facebook_ads.AdCampaign']"}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_bid': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'targeting': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['facebook_ads.Targeting']", 'unique': 'True'}),
'updated_time': ('django.db.models.fields.DateTimeField', [], {})
},
'facebook_ads.targeting': {
'Meta': {'object_name': 'Targeting'},
'age_max': ('facebook_ads.fields.PositiveSmallIntegerRangeField', [], {'null': 'True', 'blank': 'True'}),
'age_min': ('facebook_ads.fields.PositiveSmallIntegerRangeField', [], {'null': 'True', 'blank': 'True'}),
'broad_age': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'cities': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'college_majors': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '100'}),
'college_networks': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'college_years': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'connections': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'countries': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '100', 'blank': 'True'}),
'education_statuses': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '100'}),
'excluded_connections': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'friends_of_connections': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'genders': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interested_in': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'keywords': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '100'}),
'locales': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'radius': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'regions': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'relationship_statuses': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user_adclusters': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user_event': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'work_networks': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'zips': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['facebook_ads']
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.GroupsCommand "$@"
|
unknown
|
github
|
https://github.com/apache/kafka
|
bin/kafka-groups.sh
|
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
import eventlet
from eventlet import greenpool
from neutron.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
def _thread_done(gt, *args, **kwargs):
"""Callback function to be passed to GreenThread.link() when we spawn()
Calls the :class:`ThreadGroup` to notify if.
"""
kwargs['group'].thread_done(kwargs['thread'])
class Thread(object):
"""Wrapper around a greenthread, that holds a reference to the
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
it has done so it can be removed from the threads list.
"""
def __init__(self, thread, group):
self.thread = thread
self.thread.link(_thread_done, group=group, thread=self)
def stop(self):
self.thread.kill()
def wait(self):
return self.thread.wait()
def link(self, func, *args, **kwargs):
self.thread.link(func, *args, **kwargs)
class ThreadGroup(object):
"""The point of the ThreadGroup class is to:
* keep track of timers and greenthreads (making it easier to stop them
when need be).
* provide an easy API to add timers.
"""
def __init__(self, thread_pool_size=10):
self.pool = greenpool.GreenPool(thread_pool_size)
self.threads = []
self.timers = []
def add_dynamic_timer(self, callback, initial_delay=None,
periodic_interval_max=None, *args, **kwargs):
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
timer.start(initial_delay=initial_delay,
periodic_interval_max=periodic_interval_max)
self.timers.append(timer)
def add_timer(self, interval, callback, initial_delay=None,
*args, **kwargs):
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
pulse.start(interval=interval,
initial_delay=initial_delay)
self.timers.append(pulse)
def add_thread(self, callback, *args, **kwargs):
gt = self.pool.spawn(callback, *args, **kwargs)
th = Thread(gt, self)
self.threads.append(th)
return th
def thread_done(self, thread):
self.threads.remove(thread)
def _stop_threads(self):
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
# don't kill the current thread.
continue
try:
x.stop()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
def stop_timers(self):
for x in self.timers:
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
self.timers = []
def stop(self, graceful=False):
"""stop function has the option of graceful=True/False.
* In case of graceful=True, wait for all threads to be finished.
Never kill threads.
* In case of graceful=False, kill threads immediately.
"""
self.stop_timers()
if graceful:
# In case of graceful=True, wait for all threads to be
# finished, never kill threads
self.wait()
else:
# In case of graceful=False(Default), kill threads
# immediately
self._stop_threads()
def wait(self):
for x in self.timers:
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
continue
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime as dt
import mock
import httplib as http
from dateutil.parser import parse as parse_date
from nose.tools import * # noqa PEP8 asserts
from modularodm import Q
from framework.exceptions import HTTPError
from framework.auth import Auth
from website.models import Node, MetaSchema, DraftRegistration
from website.project.metadata.schemas import ACTIVE_META_SCHEMAS, _name_to_id
from website.util import permissions, api_url_for
from website.project.views import drafts as draft_views
from tests.factories import (
NodeFactory, AuthUserFactory, DraftRegistrationFactory, RegistrationFactory
)
from tests.test_registrations.base import RegistrationsTestBase
from tests.base import get_default_metaschema
class TestRegistrationViews(RegistrationsTestBase):
def test_node_register_page_not_registration_redirects(self):
url = self.node.web_url_for('node_register_page')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, http.FOUND)
@mock.patch('website.archiver.tasks.archive')
def test_node_register_page_registration(self, mock_archive):
reg = self.node.register_node(get_default_metaschema(), self.auth, '', None)
url = reg.web_url_for('node_register_page')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
def test_non_admin_can_view_node_register_page(self):
non_admin = AuthUserFactory()
self.node.add_contributor(
non_admin,
permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS,
auth=self.auth,
save=True
)
reg = RegistrationFactory(project=self.node)
url = reg.web_url_for('node_register_page')
res = self.app.get(url, auth=non_admin.auth)
assert_equal(res.status_code, http.OK)
def test_is_public_node_register_page(self):
self.node.is_public = True
self.node.save()
reg = RegistrationFactory(project=self.node)
reg.is_public = True
reg.save()
url = reg.web_url_for('node_register_page')
res = self.app.get(url, auth=None)
assert_equal(res.status_code, http.OK)
@mock.patch('framework.tasks.handlers.enqueue_task', mock.Mock())
def test_register_template_page_backwards_comptability(self):
# Historically metaschema's were referenced by a slugified version
# of their name.
reg = self.draft.register(
auth=self.auth,
save=True
)
url = reg.web_url_for(
'node_register_template_page',
metaschema_id=_name_to_id(self.meta_schema.name),
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
def test_register_template_page_redirects_if_not_registration(self):
url = self.node.web_url_for(
'node_register_template_page',
metaschema_id=self.meta_schema._id,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, http.FOUND)
class TestDraftRegistrationViews(RegistrationsTestBase):
def tearDown(self):
super(TestDraftRegistrationViews, self).tearDown()
DraftRegistration.remove()
def test_submit_draft_for_review(self):
url = self.draft_api_url('submit_draft_for_review')
res = self.app.post_json(
url,
self.embargo_payload,
auth=self.user.auth
)
assert_equal(res.status_code, http.ACCEPTED)
data = res.json
assert_in('status', data)
assert_equal(data['status'], 'initiated')
self.draft.reload()
assert_is_not_none(self.draft.approval)
assert_equal(self.draft.approval.meta, {
u'registration_choice': unicode(self.embargo_payload['registrationChoice']),
u'embargo_end_date': unicode(self.embargo_payload['embargoEndDate'])
})
def test_submit_draft_for_review_invalid_registrationChoice(self):
url = self.draft_api_url('submit_draft_for_review')
res = self.app.post_json(
url,
self.invalid_payload,
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_submit_draft_for_review_already_registered(self):
reg = RegistrationFactory(user=self.user)
res = self.app.post_json(
reg.api_url_for('submit_draft_for_review', draft_id=self.draft._id),
self.invalid_payload,
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_draft_before_register_page(self):
url = self.draft_url('draft_before_register_page')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
def test_submit_draft_for_review_non_admin(self):
url = self.draft_api_url('submit_draft_for_review')
res = self.app.post_json(
url,
self.embargo_payload,
auth=self.non_admin.auth,
expect_errors=True
)
assert_equal(res.status_code, http.FORBIDDEN)
@mock.patch('website.project.model.DraftRegistration.register', autospec=True)
def test_register_draft_registration(self, mock_register_draft):
url = self.node.api_url_for('register_draft_registration', draft_id=self.draft._id)
res = self.app.post_json(url, {
'registrationChoice': 'immediate'
}, auth=self.user.auth)
assert_equal(res.status_code, http.ACCEPTED)
assert_equal(mock_register_draft.call_args[0][0]._id, self.draft._id)
@mock.patch('framework.tasks.handlers.enqueue_task')
def test_register_template_make_public_creates_pending_registration(self, mock_enquque):
url = self.node.api_url_for('register_draft_registration', draft_id=self.draft._id)
res = self.app.post_json(url, self.immediate_payload, auth=self.user.auth)
assert_equal(res.status_code, http.ACCEPTED)
self.node.reload()
# Most recent node is a registration
reg = Node.load(self.node.node__registrations[-1])
assert_true(reg.is_registration)
# The registration created is public
assert_true(reg.is_pending_registration)
@mock.patch('framework.tasks.handlers.enqueue_task')
def test_register_template_make_public_makes_children_pending_registration(self, mock_enqueue):
comp1 = NodeFactory(parent=self.node)
NodeFactory(parent=comp1)
url = self.node.api_url_for('register_draft_registration', draft_id=self.draft._id)
res = self.app.post_json(url, self.immediate_payload, auth=self.user.auth)
assert_equal(res.status_code, http.ACCEPTED)
self.node.reload()
# Most recent node is a registration
reg = Node.load(self.node.node__registrations[-1])
for node in reg.get_descendants_recursive():
assert_true(node.is_registration)
assert_true(node.is_pending_registration)
@mock.patch('framework.tasks.handlers.enqueue_task')
def test_register_draft_registration_with_embargo_creates_embargo(self, mock_enquque):
url = self.node.api_url_for('register_draft_registration', draft_id=self.draft._id)
end_date = dt.datetime.utcnow() + dt.timedelta(days=3)
res = self.app.post_json(
url,
{
'registrationChoice': 'embargo',
'embargoEndDate': end_date.strftime('%c'),
},
auth=self.user.auth)
assert_equal(res.status_code, http.ACCEPTED)
self.node.reload()
# Most recent node is a registration
reg = Node.load(self.node.node__registrations[-1])
assert_true(reg.is_registration)
# The registration created is not public
assert_false(reg.is_public)
# The registration is pending an embargo that has not been approved
assert_true(reg.is_pending_embargo)
assert_false(reg.embargo_end_date)
@mock.patch('framework.tasks.handlers.enqueue_task')
def test_register_draft_registration_with_embargo_adds_to_parent_project_logs(self, mock_enquque):
initial_project_logs = len(self.node.logs)
res = self.app.post_json(
self.node.api_url_for('register_draft_registration', draft_id=self.draft._id),
self.embargo_payload,
auth=self.user.auth
)
assert_equal(res.status_code, http.ACCEPTED)
self.node.reload()
# Logs: Created, registered, embargo initiated
assert_equal(len(self.node.logs), initial_project_logs + 1)
@mock.patch('framework.tasks.handlers.enqueue_task')
def test_register_draft_registration_with_embargo_is_not_public(self, mock_enqueue):
res = self.app.post_json(
self.node.api_url_for('register_draft_registration', draft_id=self.draft._id),
self.embargo_payload,
auth=self.user.auth
)
assert_equal(res.status_code, http.ACCEPTED)
registration = Node.find().sort('-registered_date')[0]
assert_true(registration.is_registration)
assert_false(registration.is_public)
assert_true(registration.is_pending_embargo)
assert_is_not_none(registration.embargo)
@mock.patch('framework.tasks.handlers.enqueue_task')
def test_register_draft_registration_invalid_embargo_end_date_raises_HTTPError(self, mock_enqueue):
res = self.app.post_json(
self.node.api_url_for('register_draft_registration', draft_id=self.draft._id),
self.invalid_embargo_date_payload,
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_register_draft_registration_invalid_registrationChoice(self):
res = self.app.post_json(
self.node.api_url_for('register_draft_registration', draft_id=self.draft._id),
self.invalid_payload,
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_register_draft_registration_already_registered(self):
reg = RegistrationFactory(user=self.user)
res = self.app.post_json(
reg.api_url_for('register_draft_registration', draft_id=self.draft._id),
self.invalid_payload,
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_get_draft_registration(self):
url = self.draft_api_url('get_draft_registration')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
assert_equal(res.json['pk'], self.draft._id)
def test_get_draft_registration_invalid(self):
url = self.node.api_url_for('get_draft_registration', draft_id='13123123')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.NOT_FOUND)
def test_get_draft_registration_not_admin(self):
url = self.draft_api_url('get_draft_registration')
res = self.app.get(url, auth=self.non_admin.auth, expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_get_draft_registrations_only_gets_drafts_for_that_node(self):
dummy = NodeFactory()
# Drafts for dummy node
for i in range(5):
d = DraftRegistrationFactory(
initiator=self.user,
branched_from=dummy,
meta_schema=self.meta_schema,
schema_data={}
)
found = [self.draft]
# Drafts for self.node
for i in range(3):
d = DraftRegistrationFactory(
initiator=self.user,
branched_from=self.node,
meta_schema=self.meta_schema,
schema_data={}
)
found.append(d)
url = self.node.api_url_for('get_draft_registrations')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
# 3 new, 1 from setUp
assert_equal(len(res.json['drafts']), 4)
for draft in res.json['drafts']:
assert_in(draft['pk'], [f._id for f in found])
def test_new_draft_registration_POST(self):
target = NodeFactory(creator=self.user)
payload = {
'schema_name': self.meta_schema.name,
'schema_version': self.meta_schema.schema_version
}
url = target.web_url_for('new_draft_registration')
res = self.app.post(url, payload, auth=self.user.auth)
assert_equal(res.status_code, http.FOUND)
target.reload()
draft = DraftRegistration.find_one(Q('branched_from', 'eq', target))
assert_equal(draft.registration_schema, self.meta_schema)
def test_new_draft_registration_on_registration(self):
target = RegistrationFactory(user=self.user)
payload = {
'schema_name': self.meta_schema.name,
'schema_version': self.meta_schema.schema_version
}
url = target.web_url_for('new_draft_registration')
res = self.app.post(url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_update_draft_registration_cant_update_registered(self):
metadata = {
'summary': {'value': 'updated'}
}
assert_not_equal(metadata, self.draft.registration_metadata)
payload = {
'schema_data': metadata,
'schema_name': 'OSF-Standard Pre-Data Collection Registration',
'schema_version': 1
}
self.draft.register(self.auth, save=True)
url = self.node.api_url_for('update_draft_registration', draft_id=self.draft._id)
res = self.app.put_json(url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_edit_draft_registration_page_already_registered(self):
self.draft.register(self.auth, save=True)
url = self.node.web_url_for('edit_draft_registration_page', draft_id=self.draft._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_update_draft_registration(self):
metadata = {
'summary': {
'value': 'updated',
'comments': []
}
}
assert_not_equal(metadata, self.draft.registration_metadata)
payload = {
'schema_data': metadata,
'schema_name': 'OSF-Standard Pre-Data Collection Registration',
'schema_version': 1
}
url = self.node.api_url_for('update_draft_registration', draft_id=self.draft._id)
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
open_ended_schema = MetaSchema.find_one(
Q('name', 'eq', 'OSF-Standard Pre-Data Collection Registration') &
Q('schema_version', 'eq', 1)
)
self.draft.reload()
assert_equal(open_ended_schema, self.draft.registration_schema)
assert_equal(metadata, self.draft.registration_metadata)
def test_update_draft_registration_non_admin(self):
metadata = {
'summary': {
'value': 'updated',
'comments': []
}
}
assert_not_equal(metadata, self.draft.registration_metadata)
payload = {
'schema_data': metadata,
'schema_name': 'OSF-Standard Pre-Data Collection Registration',
'schema_version': 1
}
url = self.node.api_url_for('update_draft_registration', draft_id=self.draft._id)
res = self.app.put_json(url, payload, auth=self.non_admin.auth, expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_delete_draft_registration(self):
assert_equal(1, DraftRegistration.find().count())
url = self.node.api_url_for('delete_draft_registration', draft_id=self.draft._id)
res = self.app.delete(url, auth=self.user.auth)
assert_equal(res.status_code, http.NO_CONTENT)
assert_equal(0, DraftRegistration.find().count())
def test_delete_draft_registration_non_admin(self):
assert_equal(1, DraftRegistration.find().count())
url = self.node.api_url_for('delete_draft_registration', draft_id=self.draft._id)
res = self.app.delete(url, auth=self.non_admin.auth, expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
assert_equal(1, DraftRegistration.find().count())
@mock.patch('website.archiver.tasks.archive')
def test_delete_draft_registration_registered(self, mock_register_draft):
self.draft.register(auth=self.auth, save=True)
url = self.node.api_url_for('delete_draft_registration', draft_id=self.draft._id)
res = self.app.delete(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
@mock.patch('website.archiver.tasks.archive')
def test_delete_draft_registration_approved_and_registration_deleted(self, mock_register_draft):
self.draft.register(auth=self.auth, save=True)
self.draft.registered_node.is_deleted = True
self.draft.registered_node.save()
assert_equal(1, DraftRegistration.find().count())
url = self.node.api_url_for('delete_draft_registration', draft_id=self.draft._id)
res = self.app.delete(url, auth=self.user.auth)
assert_equal(res.status_code, http.NO_CONTENT)
assert_equal(0, DraftRegistration.find().count())
def test_only_admin_can_delete_registration(self):
non_admin = AuthUserFactory()
assert_equal(1, DraftRegistration.find().count())
url = self.node.api_url_for('delete_draft_registration', draft_id=self.draft._id)
res = self.app.delete(url, auth=non_admin.auth, expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
assert_equal(1, DraftRegistration.find().count())
def test_get_metaschemas(self):
url = api_url_for('get_metaschemas')
res = self.app.get(url).json
assert_equal(len(res['meta_schemas']), len(ACTIVE_META_SCHEMAS))
def test_get_metaschemas_all(self):
url = api_url_for('get_metaschemas', include='all')
res = self.app.get(url)
assert_equal(res.status_code, http.OK)
assert_equal(len(res.json['meta_schemas']), len(
[
schema for schema in MetaSchema.find()
if schema.name in ACTIVE_META_SCHEMAS
]
))
def test_validate_embargo_end_date_too_soon(self):
today = dt.datetime.today()
too_soon = today + dt.timedelta(days=5)
try:
draft_views.validate_embargo_end_date(too_soon.isoformat(), self.node)
except HTTPError as e:
assert_equal(e.code, http.BAD_REQUEST)
else:
self.fail()
def test_validate_embargo_end_date_too_late(self):
today = dt.datetime.today()
too_late = today + dt.timedelta(days=(4 * 365) + 1)
try:
draft_views.validate_embargo_end_date(too_late.isoformat(), self.node)
except HTTPError as e:
assert_equal(e.code, http.BAD_REQUEST)
else:
self.fail()
def test_validate_embargo_end_date_ok(self):
today = dt.datetime.today()
too_late = today + dt.timedelta(days=12)
try:
draft_views.validate_embargo_end_date(too_late.isoformat(), self.node)
except Exception:
self.fail()
def test_check_draft_state_registered(self):
reg = RegistrationFactory()
self.draft.registered_node = reg
self.draft.save()
try:
draft_views.check_draft_state(self.draft)
except HTTPError as e:
assert_equal(e.code, http.FORBIDDEN)
else:
self.fail()
def test_check_draft_state_registered_but_deleted(self):
reg = RegistrationFactory()
self.draft.registered_node = reg
reg.is_deleted = True
self.draft.save()
try:
draft_views.check_draft_state(self.draft)
except Exception:
self.fail()
def test_check_draft_state_pending_review(self):
self.draft.submit_for_review(self.user, self.immediate_payload, save=True)
try:
with mock.patch.object(DraftRegistration, 'requires_approval', mock.PropertyMock(return_value=True)):
draft_views.check_draft_state(self.draft)
except HTTPError as e:
assert_equal(e.code, http.FORBIDDEN)
else:
self.fail()
def test_check_draft_state_approved(self):
try:
with mock.patch.object(DraftRegistration, 'requires_approval', mock.PropertyMock(return_value=True)), mock.patch.object(DraftRegistration, 'is_approved', mock.PropertyMock(return_value=True)):
draft_views.check_draft_state(self.draft)
except HTTPError as e:
assert_equal(e.code, http.FORBIDDEN)
else:
self.fail()
def test_check_draft_state_ok(self):
try:
draft_views.check_draft_state(self.draft)
except Exception:
self.fail()
def test_check_draft_state_registered_and_deleted_and_approved(self):
reg = RegistrationFactory()
self.draft.registered_node = reg
self.draft.save()
reg.is_deleted = True
reg.save()
with mock.patch('website.project.model.DraftRegistration.is_approved', mock.PropertyMock(return_value=True)):
try:
draft_views.check_draft_state(self.draft)
except HTTPError:
self.fail()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# This is free and unencumbered software released into the public domain.
import erlang, os, struct
def send(term, stream):
"""Write an Erlang term to an output stream."""
payload = erlang.term_to_binary(term)
header = struct.pack('!I', len(payload))
stream.write(header)
stream.write(payload)
stream.flush()
def recv(stream):
"""Read an Erlang term from an input stream."""
header = stream.read(4)
if len(header) != 4:
return None # EOF
(length,) = struct.unpack('!I', header)
payload = stream.read(length)
if len(payload) != length:
return None
term = erlang.binary_to_term(payload)
return term
def recv_loop(stream):
"""Yield Erlang terms from an input stream."""
message = recv(stream)
while message:
yield message
message = recv(stream)
if __name__ == '__main__':
input, output = os.fdopen(3, 'rb'), os.fdopen(4, 'wb')
for message in recv_loop(input):
send(message, output) # echo the message back
|
unknown
|
codeparrot/codeparrot-clean
| ||
from calyptos.plugins.validator.validatorplugin import ValidatorPlugin
class Storage(ValidatorPlugin):
def validate(self):
self.topology = self.environment['default_attributes']['eucalyptus']['topology']
if 'system-properties' in self.environment['default_attributes']['eucalyptus']:
self.systemproperties = self.environment['default_attributes']['eucalyptus']['system-properties']
for name in self.topology['clusters'].keys():
if 'storage-backend' in self.topology['clusters'][name]:
storage_options = ['netapp', 'ceph-rbd', 'threepar']
netapp_properties = [name + '.storage.chapuser', name + '.storage.ncpaths', name + '.storage.scpaths',
name + '.storage.sanhost', name + '.storage.sanpassword', name +
'.storage.sanuser', name + '.storage.vservername']
ceph_properties = [name + '.storage.cephconfigfile', name + '.storage.cephkeyringfile',
name + '.storage.cephsnapshotpools', name + '.storage.cephuser',
name + '.storage.cephvolumepools']
threepar_properties = [name + '.storage.chapuser', name + '.storage.ncpaths', name + '.storage.sanhost',
name + '.storage.sanuser', name + '.storage.sanpassword', name +
'.storage.scpaths', name + '.storage.threeparwsport', name + '.storage.usercpg',
name + '.storage.copycpg']
for val1 in storage_options:
if val1 in self.topology['clusters'][name]['storage-backend']:
if val1 == "netapp":
storage_properties = netapp_properties
if val1 == "ceph-rbd":
storage_properties = ceph_properties
if val1 == "threepar":
storage_properties = threepar_properties
for val2 in storage_properties:
try:
assert val2 in self.systemproperties
self.success(val1 + ' system property ' + val2 + ' is present.')
except AssertionError, e:
self.failure(val1 + ' system property ' + val2 + ' is missing or invalid! ' + str(e))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.StatementAST import StatementAST
class StallAndWaitStatementAST(StatementAST):
def __init__(self, slicc, in_port, address):
super(StatementAST, self).__init__(slicc)
self.in_port = in_port
self.address = address
def __repr__(self):
return "[StallAndWaitStatementAst: %r]" % self.in_port
def generate(self, code, return_type):
self.in_port.assertType("InPort")
self.address.assertType("Addr")
in_port_code = self.in_port.var.code
address_code = self.address.var.code
code('''
stallBuffer(&($in_port_code), $address_code);
$in_port_code.stallMessage($address_code, clockEdge());
''')
|
unknown
|
codeparrot/codeparrot-clean
| ||
(function(QUnit) {
var sync = Backbone.sync;
var ajax = Backbone.ajax;
var emulateHTTP = Backbone.emulateHTTP;
var emulateJSON = Backbone.emulateJSON;
var history = window.history;
var pushState = history.pushState;
var replaceState = history.replaceState;
QUnit.config.noglobals = true;
QUnit.testStart(function() {
var env = QUnit.config.current.testEnvironment;
// We never want to actually call these during tests.
history.pushState = history.replaceState = function() {};
// Capture ajax settings for comparison.
Backbone.ajax = function(settings) {
env.ajaxSettings = settings;
};
// Capture the arguments to Backbone.sync for comparison.
Backbone.sync = function(method, model, options) {
env.syncArgs = {
method: method,
model: model,
options: options
};
sync.apply(this, arguments);
};
});
QUnit.testDone(function() {
Backbone.sync = sync;
Backbone.ajax = ajax;
Backbone.emulateHTTP = emulateHTTP;
Backbone.emulateJSON = emulateJSON;
history.pushState = pushState;
history.replaceState = replaceState;
});
})(QUnit);
|
javascript
|
github
|
https://github.com/lodash/lodash
|
vendor/backbone/test/setup/environment.js
|
# Base classes for ASN.1 types
import sys
from pyasn1.type import constraint, tagmap
from pyasn1 import error
class Asn1Item: pass
class Asn1ItemBase(Asn1Item):
# Set of tags for this ASN.1 type
tagSet = ()
# A list of constraint.Constraint instances for checking values
subtypeSpec = constraint.ConstraintsIntersection()
# Used for ambiguous ASN.1 types identification
typeId = None
def __init__(self, tagSet=None, subtypeSpec=None):
if tagSet is None:
self._tagSet = self.tagSet
else:
self._tagSet = tagSet
if subtypeSpec is None:
self._subtypeSpec = self.subtypeSpec
else:
self._subtypeSpec = subtypeSpec
def _verifySubtypeSpec(self, value, idx=None):
try:
self._subtypeSpec(value, idx)
except error.PyAsn1Error:
c, i, t = sys.exc_info()
raise c('%s at %s' % (i, self.__class__.__name__))
def getSubtypeSpec(self): return self._subtypeSpec
def getTagSet(self): return self._tagSet
def getEffectiveTagSet(self): return self._tagSet # used by untagged types
def getTagMap(self): return tagmap.TagMap({self._tagSet: self})
def isSameTypeWith(self, other):
return self is other or \
self._tagSet == other.getTagSet() and \
self._subtypeSpec == other.getSubtypeSpec()
def isSuperTypeOf(self, other):
"""Returns true if argument is a ASN1 subtype of ourselves"""
return self._tagSet.isSuperTagSetOf(other.getTagSet()) and \
self._subtypeSpec.isSuperTypeOf(other.getSubtypeSpec())
class __NoValue:
def __getattr__(self, attr):
raise error.PyAsn1Error('No value for %s()' % attr)
def __getitem__(self, i):
raise error.PyAsn1Error('No value')
noValue = __NoValue()
# Base class for "simple" ASN.1 objects. These are immutable.
class AbstractSimpleAsn1Item(Asn1ItemBase):
defaultValue = noValue
def __init__(self, value=None, tagSet=None, subtypeSpec=None):
Asn1ItemBase.__init__(self, tagSet, subtypeSpec)
if value is None or value is noValue:
value = self.defaultValue
if value is None or value is noValue:
self.__hashedValue = value = noValue
else:
value = self.prettyIn(value)
self._verifySubtypeSpec(value)
self.__hashedValue = hash(value)
self._value = value
self._len = None
def __repr__(self):
if self._value is noValue:
return self.__class__.__name__ + '()'
else:
return self.__class__.__name__ + '(%s)' % (self.prettyOut(self._value),)
def __str__(self): return str(self._value)
def __eq__(self, other):
return self is other and True or self._value == other
def __ne__(self, other): return self._value != other
def __lt__(self, other): return self._value < other
def __le__(self, other): return self._value <= other
def __gt__(self, other): return self._value > other
def __ge__(self, other): return self._value >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._value)
else:
def __bool__(self): return bool(self._value)
def __hash__(self): return self.__hashedValue
def clone(self, value=None, tagSet=None, subtypeSpec=None):
if value is None and tagSet is None and subtypeSpec is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
return self.__class__(value, tagSet, subtypeSpec)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
return self.__class__(value, tagSet, subtypeSpec)
def prettyIn(self, value): return value
def prettyOut(self, value): return str(value)
def prettyPrint(self, scope=0): return self.prettyOut(self._value)
# XXX Compatibility stub
def prettyPrinter(self, scope=0): return self.prettyPrint(scope)
#
# Constructed types:
# * There are five of them: Sequence, SequenceOf/SetOf, Set and Choice
# * ASN1 types and values are represened by Python class instances
# * Value initialization is made for defaulted components only
# * Primary method of component addressing is by-position. Data model for base
# type is Python sequence. Additional type-specific addressing methods
# may be implemented for particular types.
# * SequenceOf and SetOf types do not implement any additional methods
# * Sequence, Set and Choice types also implement by-identifier addressing
# * Sequence, Set and Choice types also implement by-asn1-type (tag) addressing
# * Sequence and Set types may include optional and defaulted
# components
# * Constructed types hold a reference to component types used for value
# verification and ordering.
# * Component type is a scalar type for SequenceOf/SetOf types and a list
# of types for Sequence/Set/Choice.
#
class AbstractConstructedAsn1Item(Asn1ItemBase):
componentType = None
sizeSpec = constraint.ConstraintsIntersection()
def __init__(self, componentType=None, tagSet=None,
subtypeSpec=None, sizeSpec=None):
Asn1ItemBase.__init__(self, tagSet, subtypeSpec)
if componentType is None:
self._componentType = self.componentType
else:
self._componentType = componentType
if sizeSpec is None:
self._sizeSpec = self.sizeSpec
else:
self._sizeSpec = sizeSpec
self._componentValues = []
self._componentValuesSet = 0
def __repr__(self):
r = self.__class__.__name__ + '()'
for idx in range(len(self._componentValues)):
if self._componentValues[idx] is None:
continue
r = r + '.setComponentByPosition(%s, %r)' % (
idx, self._componentValues[idx]
)
return r
def __eq__(self, other):
return self is other and True or self._componentValues == other
def __ne__(self, other): return self._componentValues != other
def __lt__(self, other): return self._componentValues < other
def __le__(self, other): return self._componentValues <= other
def __gt__(self, other): return self._componentValues > other
def __ge__(self, other): return self._componentValues >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._componentValues)
else:
def __bool__(self): return bool(self._componentValues)
def getComponentTagMap(self):
raise error.PyAsn1Error('Method not implemented')
def _cloneComponentValues(self, myClone, cloneValueFlag): pass
def clone(self, tagSet=None, subtypeSpec=None, sizeSpec=None,
cloneValueFlag=None):
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if sizeSpec is None:
sizeSpec = self._sizeSpec
r = self.__class__(self._componentType, tagSet, subtypeSpec, sizeSpec)
if cloneValueFlag:
self._cloneComponentValues(r, cloneValueFlag)
return r
def subtype(self, implicitTag=None, explicitTag=None, subtypeSpec=None,
sizeSpec=None, cloneValueFlag=None):
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if sizeSpec is None:
sizeSpec = self._sizeSpec
else:
sizeSpec = sizeSpec + self._sizeSpec
r = self.__class__(self._componentType, tagSet, subtypeSpec, sizeSpec)
if cloneValueFlag:
self._cloneComponentValues(r, cloneValueFlag)
return r
def _verifyComponent(self, idx, value): pass
def verifySizeSpec(self): self._sizeSpec(self)
def getComponentByPosition(self, idx):
raise error.PyAsn1Error('Method not implemented')
def setComponentByPosition(self, idx, value, verifyConstraints=True):
raise error.PyAsn1Error('Method not implemented')
def getComponentType(self): return self._componentType
def __getitem__(self, idx): return self.getComponentByPosition(idx)
def __setitem__(self, idx, value): self.setComponentByPosition(idx, value)
def __len__(self): return len(self._componentValues)
def clear(self):
self._componentValues = []
self._componentValuesSet = 0
def setDefaultComponents(self): pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
try:
import json
except ImportError:
import simplejson as json
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.sqs import exceptions
class SQSConnection(AWSQueryConnection):
"""
Welcome to the Amazon Simple Queue Service API Reference . This
section describes who should read this guide, how the guide is
organized, and other resources related to the Amazon Simple Queue
Service (Amazon SQS).
Amazon SQS offers reliable and scalable hosted queues for storing
messages as they travel between computers. By using Amazon SQS,
you can move data between distributed components of your
applications that perform different tasks without losing messages
or requiring each component to be always available.
Helpful Links:
+ `Current WSDL (2012-11-05)`_
+ `Making API Requests`_
+ `Amazon SQS product page`_
+ `Regions and Endpoints`_
We also provide SDKs that enable you to access Amazon SQS from
your preferred programming language. The SDKs contain
functionality that automatically takes care of tasks such as:
+ Cryptographically signing your service requests
+ Retrying requests
+ Handling error responses
For a list of available SDKs, go to `Tools for Amazon Web
Services`_.
"""
APIVersion = "2012-11-05"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "sqs.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"QueueDoesNotExist": exceptions.QueueDoesNotExist,
"BatchEntryIdsNotDistinct": exceptions.BatchEntryIdsNotDistinct,
"EmptyBatchRequest": exceptions.EmptyBatchRequest,
"OverLimit": exceptions.OverLimit,
"QueueNameExists": exceptions.QueueNameExists,
"InvalidMessageContents": exceptions.InvalidMessageContents,
"TooManyEntriesInBatchRequest": exceptions.TooManyEntriesInBatchRequest,
"QueueDeletedRecently": exceptions.QueueDeletedRecently,
"InvalidBatchEntryId": exceptions.InvalidBatchEntryId,
"BatchRequestTooLong": exceptions.BatchRequestTooLong,
"InvalidIdFormat": exceptions.InvalidIdFormat,
"ReceiptHandleIsInvalid": exceptions.ReceiptHandleIsInvalid,
"InvalidAttributeName": exceptions.InvalidAttributeName,
"MessageNotInflight": exceptions.MessageNotInflight,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(SQSConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_permission(self, queue_url, label, aws_account_ids, actions):
"""
Adds a permission to a queue for a specific `principal`_. This
allows for sharing access to the queue.
When you create a queue, you have full control access rights
for the queue. Only you (as owner of the queue) can grant or
deny permissions to the queue. For more information about
these permissions, see `Shared Queues`_ in the Amazon SQS
Developer Guide .
`AddPermission` writes an Amazon SQS-generated policy. If you
want to write your own policy, use SetQueueAttributes to
upload your policy. For more information about writing your
own policy, see `Using The Access Policy Language`_ in the
Amazon SQS Developer Guide .
Some API actions take lists of parameters. These lists are
specified using the `param.n` notation. Values of `n` are
integers starting from 1. For example, a parameter list with
two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type label: string
:param label: The unique identification of the permission you're
setting (e.g., `AliceSendMessage`). Constraints: Maximum 80
characters; alphanumeric characters, hyphens (-), and underscores
(_) are allowed.
:type aws_account_ids: list
:param aws_account_ids: The AWS account number of the `principal`_ who
will be given permission. The principal must have an AWS account,
but does not need to be signed up for Amazon SQS. For information
about locating the AWS account identification, see `Your AWS
Identifiers`_ in the Amazon SQS Developer Guide .
:type actions: list
:param actions: The action the client wants to allow for the specified
principal. The following are valid values: `* | SendMessage |
ReceiveMessage | DeleteMessage | ChangeMessageVisibility |
GetQueueAttributes | GetQueueUrl`. For more information about these
actions, see `Understanding Permissions`_ in the Amazon SQS
Developer Guide .
Specifying `SendMessage`, `DeleteMessage`, or `ChangeMessageVisibility`
for the `ActionName.n` also grants permissions for the
corresponding batch versions of those actions: `SendMessageBatch`,
`DeleteMessageBatch`, and `ChangeMessageVisibilityBatch`.
"""
params = {'QueueUrl': queue_url, 'Label': label, }
self.build_list_params(params,
aws_account_ids,
'AWSAccountIds.member')
self.build_list_params(params,
actions,
'Actions.member')
return self._make_request(
action='AddPermission',
verb='POST',
path='/', params=params)
def change_message_visibility(self, queue_url, receipt_handle,
visibility_timeout):
"""
Changes the visibility timeout of a specified message in a
queue to a new value. The maximum allowed timeout value you
can set the value to is 12 hours. This means you can't extend
the timeout of a message in an existing queue to more than a
total visibility timeout of 12 hours. (For more information
visibility timeout, see `Visibility Timeout`_ in the Amazon
SQS Developer Guide .)
For example, let's say you have a message and its default
message visibility timeout is 30 minutes. You could call
`ChangeMessageVisiblity` with a value of two hours and the
effective timeout would be two hours and 30 minutes. When that
time comes near you could again extend the time out by calling
ChangeMessageVisiblity, but this time the maximum allowed
timeout would be 9 hours and 30 minutes.
If you attempt to set the `VisibilityTimeout` to an amount
more than the maximum time left, Amazon SQS returns an error.
It will not automatically recalculate and increase the timeout
to the maximum time remaining. Unlike with a queue, when you
change the visibility timeout for a specific message, that
timeout value is applied immediately but is not saved in
memory for that message. If you don't delete a message after
it is received, the visibility timeout for the message the
next time it is received reverts to the original timeout
value, not the value you set with the
`ChangeMessageVisibility` action.
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type receipt_handle: string
:param receipt_handle: The receipt handle associated with the message
whose visibility timeout should be changed. This parameter is
returned by the ReceiveMessage action.
:type visibility_timeout: integer
:param visibility_timeout: The new value (in seconds - from 0 to 43200
- maximum 12 hours) for the message's visibility timeout.
"""
params = {
'QueueUrl': queue_url,
'ReceiptHandle': receipt_handle,
'VisibilityTimeout': visibility_timeout,
}
return self._make_request(
action='ChangeMessageVisibility',
verb='POST',
path='/', params=params)
def change_message_visibility_batch(self, queue_url, entries):
"""
Changes the visibility timeout of multiple messages. This is a
batch version of ChangeMessageVisibility. The result of the
action on each message is reported individually in the
response. You can send up to 10 ChangeMessageVisibility
requests with each `ChangeMessageVisibilityBatch` action.
Because the batch request can result in a combination of
successful and unsuccessful actions, you should check for
batch errors even when the call returns an HTTP status code of
200. Some API actions take lists of parameters. These lists
are specified using the `param.n` notation. Values of `n` are
integers starting from 1. For example, a parameter list with
two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type entries: list
:param entries: A list of receipt handles of the messages for which the
visibility timeout must be changed.
"""
params = {'QueueUrl': queue_url, }
self.build_complex_list_params(
params, entries,
'Entries.member',
('Id', 'ReceiptHandle', 'VisibilityTimeout'))
return self._make_request(
action='ChangeMessageVisibilityBatch',
verb='POST',
path='/', params=params)
def create_queue(self, queue_name, attributes=None):
"""
Creates a new queue, or returns the URL of an existing one.
When you request `CreateQueue`, you provide a name for the
queue. To successfully create a new queue, you must provide a
name that is unique within the scope of your own queues.
If you delete a queue, you must wait at least 60 seconds
before creating a queue with the same name.
You may pass one or more attributes in the request. If you do
not provide a value for any attribute, the queue will have the
default value for that attribute. Permitted attributes are the
same that can be set using SetQueueAttributes.
Use GetQueueUrl to get a queue's URL. GetQueueUrl requires
only the `QueueName` parameter.
If you provide the name of an existing queue, along with the
exact names and values of all the queue's attributes,
`CreateQueue` returns the queue URL for the existing queue. If
the queue name, attribute names, or attribute values do not
match an existing queue, `CreateQueue` returns an error.
Some API actions take lists of parameters. These lists are
specified using the `param.n` notation. Values of `n` are
integers starting from 1. For example, a parameter list with
two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_name: string
:param queue_name: The name for the queue to be created.
:type attributes: map
:param attributes: A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special
request parameters the `CreateQueue` action uses:
+ `DelaySeconds` - The time in seconds that the delivery of all
messages in the queue will be delayed. An integer from 0 to 900 (15
minutes). The default for this attribute is 0 (zero).
+ `MaximumMessageSize` - The limit of how many bytes a message can
contain before Amazon SQS rejects it. An integer from 1024 bytes (1
KiB) up to 262144 bytes (256 KiB). The default for this attribute
is 262144 (256 KiB).
+ `MessageRetentionPeriod` - The number of seconds Amazon SQS retains a
message. Integer representing seconds, from 60 (1 minute) to
1209600 (14 days). The default for this attribute is 345600 (4
days).
+ `Policy` - The queue's policy. A valid form-url-encoded policy. For
more information about policy structure, see `Basic Policy
Structure`_ in the Amazon SQS Developer Guide . For more
information about form-url-encoding, see `http://www.w3.org/MarkUp
/html-spec/html-spec_8.html#SEC8.2.1`_.
+ `ReceiveMessageWaitTimeSeconds` - The time for which a ReceiveMessage
call will wait for a message to arrive. An integer from 0 to 20
(seconds). The default for this attribute is 0.
+ `VisibilityTimeout` - The visibility timeout for the queue. An
integer from 0 to 43200 (12 hours). The default for this attribute
is 30. For more information about visibility timeout, see
`Visibility Timeout`_ in the Amazon SQS Developer Guide .
"""
params = {'QueueName': queue_name, }
if attributes is not None:
params['Attributes'] = attributes
return self._make_request(
action='CreateQueue',
verb='POST',
path='/', params=params)
def delete_message(self, queue_url, receipt_handle):
"""
Deletes the specified message from the specified queue. You
specify the message by using the message's `receipt handle`
and not the `message ID` you received when you sent the
message. Even if the message is locked by another reader due
to the visibility timeout setting, it is still deleted from
the queue. If you leave a message in the queue for longer than
the queue's configured retention period, Amazon SQS
automatically deletes it.
The receipt handle is associated with a specific instance of
receiving the message. If you receive a message more than
once, the receipt handle you get each time you receive the
message is different. When you request `DeleteMessage`, if you
don't provide the most recently received receipt handle for
the message, the request will still succeed, but the message
might not be deleted.
It is possible you will receive a message even after you have
deleted it. This might happen on rare occasions if one of the
servers storing a copy of the message is unavailable when you
request to delete the message. The copy remains on the server
and might be returned to you again on a subsequent receive
request. You should create your system to be idempotent so
that receiving a particular message more than once is not a
problem.
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type receipt_handle: string
:param receipt_handle: The receipt handle associated with the message
to delete.
"""
params = {
'QueueUrl': queue_url,
'ReceiptHandle': receipt_handle,
}
return self._make_request(
action='DeleteMessage',
verb='POST',
path='/', params=params)
def delete_message_batch(self, queue_url, entries):
"""
Deletes multiple messages. This is a batch version of
DeleteMessage. The result of the delete action on each message
is reported individually in the response.
Because the batch request can result in a combination of
successful and unsuccessful actions, you should check for
batch errors even when the call returns an HTTP status code of
200.
Some API actions take lists of parameters. These lists are
specified using the `param.n` notation. Values of `n` are
integers starting from 1. For example, a parameter list with
two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type entries: list
:param entries: A list of receipt handles for the messages to be
deleted.
"""
params = {'QueueUrl': queue_url, }
self.build_complex_list_params(
params, entries,
'Entries.member',
('Id', 'ReceiptHandle'))
return self._make_request(
action='DeleteMessageBatch',
verb='POST',
path='/', params=params)
def delete_queue(self, queue_url):
"""
Deletes the queue specified by the **queue URL**, regardless
of whether the queue is empty. If the specified queue does not
exist, Amazon SQS returns a successful response.
Use `DeleteQueue` with care; once you delete your queue, any
messages in the queue are no longer available.
When you delete a queue, the deletion process takes up to 60
seconds. Requests you send involving that queue during the 60
seconds might succeed. For example, a SendMessage request
might succeed, but after the 60 seconds, the queue and that
message you sent no longer exist. Also, when you delete a
queue, you must wait at least 60 seconds before creating a
queue with the same name.
We reserve the right to delete queues that have had no
activity for more than 30 days. For more information, see `How
Amazon SQS Queues Work`_ in the Amazon SQS Developer Guide .
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
"""
params = {'QueueUrl': queue_url, }
return self._make_request(
action='DeleteQueue',
verb='POST',
path='/', params=params)
def get_queue_attributes(self, queue_url, attribute_names=None):
"""
Gets attributes for the specified queue. The following
attributes are supported:
+ `All` - returns all values.
+ `ApproximateNumberOfMessages` - returns the approximate
number of visible messages in a queue. For more information,
see `Resources Required to Process Messages`_ in the Amazon
SQS Developer Guide .
+ `ApproximateNumberOfMessagesNotVisible` - returns the
approximate number of messages that are not timed-out and not
deleted. For more information, see `Resources Required to
Process Messages`_ in the Amazon SQS Developer Guide .
+ `VisibilityTimeout` - returns the visibility timeout for the
queue. For more information about visibility timeout, see
`Visibility Timeout`_ in the Amazon SQS Developer Guide .
+ `CreatedTimestamp` - returns the time when the queue was
created (epoch time in seconds).
+ `LastModifiedTimestamp` - returns the time when the queue
was last changed (epoch time in seconds).
+ `Policy` - returns the queue's policy.
+ `MaximumMessageSize` - returns the limit of how many bytes a
message can contain before Amazon SQS rejects it.
+ `MessageRetentionPeriod` - returns the number of seconds
Amazon SQS retains a message.
+ `QueueArn` - returns the queue's Amazon resource name (ARN).
+ `ApproximateNumberOfMessagesDelayed` - returns the
approximate number of messages that are pending to be added to
the queue.
+ `DelaySeconds` - returns the default delay on the queue in
seconds.
+ `ReceiveMessageWaitTimeSeconds` - returns the time for which
a ReceiveMessage call will wait for a message to arrive.
+ `RedrivePolicy` - returns the parameters for dead letter
queue functionality of the source queue. For more information
about RedrivePolicy and dead letter queues, see `Using Amazon
SQS Dead Letter Queues`_ in the Amazon SQS Developer Guide .
Going forward, new attributes might be added. If you are
writing code that calls this action, we recommend that you
structure your code so that it can handle new attributes
gracefully. Some API actions take lists of parameters. These
lists are specified using the `param.n` notation. Values of
`n` are integers starting from 1. For example, a parameter
list with two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type attribute_names: list
:param attribute_names: A list of attributes to retrieve information
for.
"""
params = {'QueueUrl': queue_url, }
if attribute_names is not None:
self.build_list_params(params,
attribute_names,
'AttributeNames.member')
return self._make_request(
action='GetQueueAttributes',
verb='POST',
path='/', params=params)
def get_queue_url(self, queue_name, queue_owner_aws_account_id=None):
"""
Returns the URL of an existing queue. This action provides a
simple way to retrieve the URL of an Amazon SQS queue.
To access a queue that belongs to another AWS account, use the
`QueueOwnerAWSAccountId` parameter to specify the account ID
of the queue's owner. The queue's owner must grant you
permission to access the queue. For more information about
shared queue access, see AddPermission or go to `Shared
Queues`_ in the Amazon SQS Developer Guide .
:type queue_name: string
:param queue_name: The name of the queue whose URL must be fetched.
Maximum 80 characters; alphanumeric characters, hyphens (-), and
underscores (_) are allowed.
:type queue_owner_aws_account_id: string
:param queue_owner_aws_account_id: The AWS account ID of the account
that created the queue.
"""
params = {'QueueName': queue_name, }
if queue_owner_aws_account_id is not None:
params['QueueOwnerAWSAccountId'] = queue_owner_aws_account_id
return self._make_request(
action='GetQueueUrl',
verb='POST',
path='/', params=params)
def list_dead_letter_source_queues(self, queue_url):
"""
Returns a list of your queues that have the RedrivePolicy
queue attribute configured with a dead letter queue.
:type queue_url: string
:param queue_url: The queue URL of a dead letter queue.
"""
params = {'QueueUrl': queue_url, }
return self._make_request(
action='ListDeadLetterSourceQueues',
verb='POST',
path='/', params=params)
def list_queues(self, queue_name_prefix=None):
"""
Returns a list of your queues. The maximum number of queues
that can be returned is 1000. If you specify a value for the
optional `QueueNamePrefix` parameter, only queues with a name
beginning with the specified value are returned.
:type queue_name_prefix: string
:param queue_name_prefix: A string to use for filtering the list
results. Only those queues whose name begins with the specified
string are returned.
"""
params = {}
if queue_name_prefix is not None:
params['QueueNamePrefix'] = queue_name_prefix
return self._make_request(
action='ListQueues',
verb='POST',
path='/', params=params)
def receive_message(self, queue_url, attribute_names=None,
max_number_of_messages=None, visibility_timeout=None,
wait_time_seconds=None):
"""
Retrieves one or more messages from the specified queue. Long
poll support is enabled by using the `WaitTimeSeconds`
parameter. For more information, see `Amazon SQS Long Poll`_
in the Amazon SQS Developer Guide .
Short poll is the default behavior where a weighted random set
of machines is sampled on a `ReceiveMessage` call. This means
only the messages on the sampled machines are returned. If the
number of messages in the queue is small (less than 1000), it
is likely you will get fewer messages than you requested per
`ReceiveMessage` call. If the number of messages in the queue
is extremely small, you might not receive any messages in a
particular `ReceiveMessage` response; in which case you should
repeat the request.
For each message returned, the response includes the
following:
+ Message body
+ MD5 digest of the message body. For information about MD5,
go to `http://www.faqs.org/rfcs/rfc1321.html`_.
+ Message ID you received when you sent the message to the
queue.
+ Receipt handle.
The receipt handle is the identifier you must provide when
deleting the message. For more information, see `Queue and
Message Identifiers`_ in the Amazon SQS Developer Guide .
You can provide the `VisibilityTimeout` parameter in your
request, which will be applied to the messages that Amazon SQS
returns in the response. If you do not include the parameter,
the overall visibility timeout for the queue is used for the
returned messages. For more information, see `Visibility
Timeout`_ in the Amazon SQS Developer Guide .
Going forward, new attributes might be added. If you are
writing code that calls this action, we recommend that you
structure your code so that it can handle new attributes
gracefully.
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type attribute_names: list
:param attribute_names:
A list of attributes that need to be returned along with each message.
The following lists the names and descriptions of the attributes that
can be returned:
+ `All` - returns all values.
+ `ApproximateFirstReceiveTimestamp` - returns the time when the
message was first received (epoch time in milliseconds).
+ `ApproximateReceiveCount` - returns the number of times a message has
been received but not deleted.
+ `SenderId` - returns the AWS account number (or the IP address, if
anonymous access is allowed) of the sender.
+ `SentTimestamp` - returns the time when the message was sent (epoch
time in milliseconds).
:type max_number_of_messages: integer
:param max_number_of_messages: The maximum number of messages to
return. Amazon SQS never returns more messages than this value but
may return fewer.
All of the messages are not necessarily returned.
:type visibility_timeout: integer
:param visibility_timeout: The duration (in seconds) that the received
messages are hidden from subsequent retrieve requests after being
retrieved by a `ReceiveMessage` request.
:type wait_time_seconds: integer
:param wait_time_seconds: The duration (in seconds) for which the call
will wait for a message to arrive in the queue before returning. If
a message is available, the call will return sooner than
WaitTimeSeconds.
"""
params = {'QueueUrl': queue_url, }
if attribute_names is not None:
self.build_list_params(params,
attribute_names,
'AttributeNames.member')
if max_number_of_messages is not None:
params['MaxNumberOfMessages'] = max_number_of_messages
if visibility_timeout is not None:
params['VisibilityTimeout'] = visibility_timeout
if wait_time_seconds is not None:
params['WaitTimeSeconds'] = wait_time_seconds
return self._make_request(
action='ReceiveMessage',
verb='POST',
path='/', params=params)
def remove_permission(self, queue_url, label):
"""
Revokes any permissions in the queue policy that matches the
specified `Label` parameter. Only the owner of the queue can
remove permissions.
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type label: string
:param label: The identification of the permission to remove. This is
the label added with the AddPermission action.
"""
params = {'QueueUrl': queue_url, 'Label': label, }
return self._make_request(
action='RemovePermission',
verb='POST',
path='/', params=params)
def send_message(self, queue_url, message_body, delay_seconds=None):
"""
Delivers a message to the specified queue. With Amazon SQS,
you now have the ability to send large payload messages that
are up to 256KB (262,144 bytes) in size. To send large
payloads, you must use an AWS SDK that supports SigV4 signing.
To verify whether SigV4 is supported for an AWS SDK, check the
SDK release notes.
The following list shows the characters (in Unicode) allowed
in your message, according to the W3C XML specification. For
more information, go to `http://www.w3.org/TR/REC-
xml/#charsets`_ If you send any characters not included in the
list, your request will be rejected.
#x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] |
[#x10000 to #x10FFFF]
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type message_body: string
:param message_body: The message to send. String maximum 256 KB in
size. For a list of allowed characters, see the preceding important
note.
:type delay_seconds: integer
:param delay_seconds: The number of seconds (0 to 900 - 15 minutes) to
delay a specific message. Messages with a positive `DelaySeconds`
value become available for processing after the delay time is
finished. If you don't specify a value, the default value for the
queue applies.
"""
params = {
'QueueUrl': queue_url,
'MessageBody': message_body,
}
if delay_seconds is not None:
params['DelaySeconds'] = delay_seconds
return self._make_request(
action='SendMessage',
verb='POST',
path='/', params=params)
def send_message_batch(self, queue_url, entries):
"""
Delivers up to ten messages to the specified queue. This is a
batch version of SendMessage. The result of the send action on
each message is reported individually in the response. The
maximum allowed individual message size is 256 KB (262,144
bytes).
The maximum total payload size (i.e., the sum of all a batch's
individual message lengths) is also 256 KB (262,144 bytes).
If the `DelaySeconds` parameter is not specified for an entry,
the default for the queue is used.
The following list shows the characters (in Unicode) that are
allowed in your message, according to the W3C XML
specification. For more information, go to
`http://www.faqs.org/rfcs/rfc1321.html`_. If you send any
characters that are not included in the list, your request
will be rejected.
#x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] |
[#x10000 to #x10FFFF]
Because the batch request can result in a combination of
successful and unsuccessful actions, you should check for
batch errors even when the call returns an HTTP status code of
200. Some API actions take lists of parameters. These lists
are specified using the `param.n` notation. Values of `n` are
integers starting from 1. For example, a parameter list with
two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type entries: list
:param entries: A list of SendMessageBatchRequestEntry items.
"""
params = {'QueueUrl': queue_url, }
self.build_complex_list_params(
params, entries,
'Entries.member',
('Id', 'MessageBody', 'DelaySeconds'))
return self._make_request(
action='SendMessageBatch',
verb='POST',
path='/', params=params)
def set_queue_attributes(self, queue_url, attributes):
"""
Sets the value of one or more queue attributes.
Going forward, new attributes might be added. If you are
writing code that calls this action, we recommend that you
structure your code so that it can handle new attributes
gracefully.
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type attributes: map
:param attributes: A map of attributes to set.
The following lists the names, descriptions, and values of the special
request parameters the `SetQueueAttributes` action uses:
+ `DelaySeconds` - The time in seconds that the delivery of all
messages in the queue will be delayed. An integer from 0 to 900 (15
minutes). The default for this attribute is 0 (zero).
+ `MaximumMessageSize` - The limit of how many bytes a message can
contain before Amazon SQS rejects it. An integer from 1024 bytes (1
KiB) up to 262144 bytes (256 KiB). The default for this attribute
is 262144 (256 KiB).
+ `MessageRetentionPeriod` - The number of seconds Amazon SQS retains a
message. Integer representing seconds, from 60 (1 minute) to
1209600 (14 days). The default for this attribute is 345600 (4
days).
+ `Policy` - The queue's policy. A valid form-url-encoded policy. For
more information about policy structure, see `Basic Policy
Structure`_ in the Amazon SQS Developer Guide . For more
information about form-url-encoding, see `http://www.w3.org/MarkUp
/html-spec/html-spec_8.html#SEC8.2.1`_.
+ `ReceiveMessageWaitTimeSeconds` - The time for which a ReceiveMessage
call will wait for a message to arrive. An integer from 0 to 20
(seconds). The default for this attribute is 0.
+ `VisibilityTimeout` - The visibility timeout for the queue. An
integer from 0 to 43200 (12 hours). The default for this attribute
is 30. For more information about visibility timeout, see
Visibility Timeout in the Amazon SQS Developer Guide .
+ `RedrivePolicy` - The parameters for dead letter queue functionality
of the source queue. For more information about RedrivePolicy and
dead letter queues, see Using Amazon SQS Dead Letter Queues in the
Amazon SQS Developer Guide .
"""
params = {'QueueUrl': queue_url, }
# TODO: NEED TO PROCESS COMPLEX ARG attributes of type map.
return self._make_request(
action='SetQueueAttributes',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2014, HashFast Technologies LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of HashFast Technologies LLC nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL HASHFAST TECHNOLOGIES LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
unknown
|
codeparrot/codeparrot-clean
| ||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Utilities for Tokenizers
This page lists all the utility functions used by the tokenizers, mainly the class
[`~tokenization_utils_base.PreTrainedTokenizerBase`] that implements the common methods between
[`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`].
Most of those are only useful if you are studying the code of the tokenizers in the library.
## PreTrainedTokenizerBase
[[autodoc]] tokenization_utils_base.PreTrainedTokenizerBase
- __call__
- all
## Enums and namedtuples
[[autodoc]] tokenization_utils_base.TruncationStrategy
[[autodoc]] tokenization_utils_base.CharSpan
[[autodoc]] tokenization_utils_base.TokenSpan
|
unknown
|
github
|
https://github.com/huggingface/transformers
|
docs/source/en/internal/tokenization_utils.md
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core helpers for loading model checkpoints."""
from __future__ import annotations
import math
import os
import re
import traceback
from abc import abstractmethod
from collections import defaultdict
from collections.abc import Callable
from concurrent.futures import Future, ThreadPoolExecutor
from contextlib import contextmanager
from copy import deepcopy
from dataclasses import dataclass, field
from itertools import chain
from typing import TYPE_CHECKING, Any
import torch
from .integrations.accelerate import get_device, offload_weight
from .integrations.tensor_parallel import ALL_PARALLEL_STYLES
from .utils import is_env_variable_true, logging
from .utils.loading_report import LoadStateDictInfo
_torch_distributed_available = torch.distributed.is_available()
if TYPE_CHECKING:
from .integrations.tensor_parallel import TensorParallelLayer
from .modeling_utils import LoadStateDictConfig, PreTrainedModel
from .quantizers import HfQuantizer
logger = logging.get_logger(__name__)
def build_glob_alternation(
globs: list[WeightRenaming | WeightConverter | str],
) -> tuple[re.Pattern, dict[str, str], dict[str, str]]:
"""
Build a single alternation regex with one named group per glob.
"""
src_group_to_glob: dict[str, str] = {}
tgt_group_to_glob: dict[str, str] = {}
branches: list[str] = []
i = 0
for glob in globs:
if isinstance(glob, (WeightRenaming, WeightConverter)):
for src in glob.source_patterns:
group_name = f"g{i}"
src_group_to_glob[group_name] = src
i += 1
body = src.replace("*", r".*")
branches.append(f"(?P<{group_name}>{body})")
tgt_group_to_glob[group_name] = glob.target_patterns[0] # we index with the first target
else:
group_name = f"g{i}"
src_group_to_glob[group_name] = glob
i += 1
body = glob
body = body.replace("*", r".*")
branches.append(f"(?P<{group_name}>{body})")
tgt_group_to_glob[group_name] = glob
alternation = re.compile("|".join(branches))
return alternation, src_group_to_glob, tgt_group_to_glob
class ConversionOps:
"""Base class for weight conversion operations."""
def __repr__(self):
if hasattr(self, "dim"):
return f"{self.__class__.__name__}(dim={self.dim})"
else:
return f"{self.__class__.__name__}"
@abstractmethod
def convert(
self, input_dict: dict[str, Any], source_patterns: list[str], target_patterns: list[str], **kwargs
) -> dict[str, list[torch.Tensor]]:
raise NotImplementedError
@property
def reverse_op(self) -> ConversionOps:
raise NotImplementedError
class Chunk(ConversionOps):
"""Split a tensor along ``dim`` into equally sized chunks."""
def __init__(self, dim: int = 0):
self.dim = dim
@torch.no_grad
def convert(
self, input_dict: dict[str, torch.Tensor], source_patterns: list[str], target_patterns: list[str], **kwargs
) -> dict[str, torch.Tensor]:
tensors = next(iter(input_dict.values()))
tensor = tensors[0] if isinstance(tensors, list) else tensors
targets = self.get_target_patterns(input_dict, target_patterns)
sizes = len(targets)
chunks = torch.chunk(tensor, sizes, dim=self.dim)
return dict(zip(targets, chunks))
def get_target_patterns(self, input_dict: dict, target_patterns: list[str]) -> list[str]:
# Here we always return the target patterns
if len(input_dict) > 1 or len(target_patterns) == 1:
raise ValueError("Undefined Operation encountered!")
return target_patterns
@property
def reverse_op(self) -> ConversionOps:
return Concatenate(self.dim)
class Concatenate(ConversionOps):
"""Concatenate tensors along `dim`."""
def __init__(self, dim: int = 0):
self.dim = dim
@torch.no_grad
def convert(
self,
input_dict: dict[str, list[torch.Tensor]],
source_patterns: list[str],
target_patterns: list[str],
**kwargs,
) -> dict[str, torch.Tensor]:
target_pattern = self.get_target_pattern(target_patterns)
all_tensors = []
# Very important to keep the relative order of the source patterns here, so we iterate over them not the
# input directly as it's unordered!
for source_pattern in source_patterns:
tensors = input_dict[source_pattern]
if isinstance(tensors, list):
all_tensors.extend(tensors)
else:
all_tensors.append(tensors)
return {target_pattern: torch.cat(all_tensors, dim=self.dim)}
def get_target_pattern(self, target_patterns: list[str]) -> str:
# Here we always return the target pattern
if len(target_patterns) > 1:
raise ValueError("Undefined Operation encountered!")
return target_patterns[0]
@property
def reverse_op(self) -> ConversionOps:
return Chunk(self.dim)
class MergeModulelist(ConversionOps):
"""
Merge a list of tensors into a single tensor along the first dimension.
We explicitly define this because for EP or TP you want to make sure you know what you are doing!
"""
def __init__(self, dim: int = 0):
self.dim = dim
@torch.no_grad
def convert(
self,
input_dict: dict[str, list[torch.Tensor]],
source_patterns: list[str],
target_patterns: list[str],
**kwargs,
) -> dict[str, torch.Tensor]:
merged: dict[str, torch.Tensor] = {}
for source_pattern, tensors in input_dict.items():
target_pattern = self.get_target_pattern(input_dict, source_pattern, target_patterns)
merged[target_pattern] = torch.stack(tensors, dim=self.dim)
return merged
def get_target_pattern(self, input_dict: dict, source_pattern: str, target_patterns: list[str]) -> str:
# Here it's a single operation, so we use the target
if len(input_dict) == 1:
if len(target_patterns) == 1:
return target_patterns[0]
else:
raise ValueError("Undefined Operation encountered!")
# Here it's the first operation in a chain, so we use the source as they were replaced before in the chain
else:
return source_pattern
@property
def reverse_op(self) -> ConversionOps:
return SplitModulelist(self.dim)
class SplitModulelist(ConversionOps):
"""Inverse of :class:`MergeModulelist` using explicit split sizes per group."""
def __init__(self, dim: int = 0):
self.dim = dim
@torch.no_grad
def convert(
self, input_dict: dict[str, torch.Tensor], source_patterns: list[str], target_patterns: list[str], **kwargs
) -> dict[str, torch.Tensor]:
all_tensors = {}
for source_pattern, tensors in input_dict.items():
tensor = tensors[0] if isinstance(tensors, list) else tensors
# We split in the number of tensors present in the given dim
sizes = tensor.size(self.dim)
targets = self.get_target_patterns(input_dict, source_pattern, target_patterns, sizes)
chunks = torch.chunk(tensor, sizes, dim=self.dim)
# We squeeze each chunk here as well to make sure to give them their original shape
all_tensors.update({target: chunk.squeeze() for target, chunk in zip(targets, chunks)})
return all_tensors
def get_target_patterns(
self, input_dict: dict, source_pattern: str, target_patterns: list[str], sizes: int
) -> list[str]:
# Here it's a single operation, so we use the target
if len(input_dict) == 1:
if len(target_patterns) == 1:
return [target_patterns[0].replace("*", f"{i}") for i in range(sizes)]
else:
raise ValueError("Undefined Operation encountered!")
# Here it's the last operation in a chain, so we use the source as they were replaced before in the chain
else:
return [source_pattern.replace("*", f"{i}") for i in range(sizes)]
@property
def reverse_op(self) -> ConversionOps:
return MergeModulelist(self.dim)
class Transpose(ConversionOps):
"""
Transposes the given tensor along dim0 and dim1.
"""
def __init__(self, dim0: int = 0, dim1: int = 1):
self.dim0 = dim0
self.dim1 = dim1
@torch.no_grad
def convert(
self, input_dict: dict[str, torch.Tensor], source_patterns: list[str], target_patterns: list[str], **kwargs
) -> dict[str, torch.Tensor]:
target_pattern = self.get_target_pattern(input_dict, source_patterns, target_patterns)
tensors = next(iter(input_dict.values()))
tensor = tensors[0] if isinstance(tensors, list) else tensors
return {target_pattern: torch.transpose(tensor, dim0=self.dim0, dim1=self.dim1).contiguous()}
def get_target_pattern(
self, input_dict: dict[str, torch.Tensor], source_patterns: list[str], target_patterns: list[str]
) -> str:
if len(input_dict) != 1:
raise ValueError("Undefined Operation encountered!")
# Here it's the first operation of a chain, so return the source
if len(target_patterns) > 1:
if len(source_patterns) == 1:
return source_patterns[0]
else:
raise ValueError("Undefined Operation encountered!")
# Here it's the only operation, or the last operation in a chain, so we return the target
else:
return target_patterns[0]
@property
def reverse_op(self) -> ConversionOps:
return Transpose(dim0=self.dim1, dim1=self.dim0)
class PermuteForRope(ConversionOps):
"""
Applies the permutation required to convert complex RoPE weights to the split sin/cos format.
"""
def __init__(self):
pass
def _apply(self, tensor: torch.Tensor) -> torch.Tensor:
dim1, dim2 = tensor.shape
n_heads = self.config.getattr("num_attention_heads", 1)
tensor = tensor.view(n_heads, dim1 // n_heads // 2, 2, dim2)
tensor = tensor.transpose(1, 2).reshape(dim1, dim2)
return tensor
@torch.no_grad
def convert(
self,
input_dict: dict[str, list[torch.Tensor]],
source_patterns: list[str],
target_patterns: list[str],
config,
**kwargs,
) -> dict[str, list[torch.Tensor]]:
self.config = config
output: dict[str, list[torch.Tensor]] = {}
for key, tensors in input_dict.items():
if len(tensors) != 1:
raise ValueError("PermuteForRope expects a single tensor per key.")
output[key] = [self._apply(tensors[0])]
return output
class ErnieFuseAndSplitTextVisionExperts(ConversionOps):
r"""
Special operation that splits a module list over all keys and fuses over the number of original modules.
Example with 2 original modules "Gate" and "Up" with 2 target keys "Text" and "Vision":
ModuleList 1 ModuleList 2
[ Gate ] [ Up ]
| | | |
[Gate_Text] [Gate_Vision] [Up_Text] [Up_Vision]
\ \ / /
\ \ / /
\ / \ /
\ / \ /
[GateUp_Text] [GateUp_Vision]
The splits are equal and are defined by the amount of target keys.
The final fusions are defined by the amount of original module lists.
"""
def __init__(self, stack_dim: int = 0, concat_dim: int = 1):
self.stack_dim = stack_dim
self.concat_dim = concat_dim
def split_list_into_chunks(self, tensor_list: list[torch.Tensor], chunks: int = 2):
split_size = math.ceil(len(tensor_list) / chunks) # best effort split size
return [tensor_list[i * split_size : (i + 1) * split_size] for i in range(chunks)]
@torch.no_grad()
def convert(
self,
input_dict: dict[str, list[torch.Tensor]],
source_patterns: list[str],
target_patterns: list[str],
config,
**kwargs,
) -> dict[str, list[torch.Tensor]]:
valid_keys = input_dict.keys()
split_and_fused = defaultdict(list)
for key in source_patterns:
if key not in valid_keys:
raise ValueError(
f"Expected pattern {key} in collected tensors but only found tensors for: {valid_keys}"
)
tensors = input_dict.get(key, [])
split_tensor_lists = self.split_list_into_chunks(tensors, chunks=len(target_patterns))
stacked_tensors = (torch.stack(tensor_group, dim=self.stack_dim) for tensor_group in split_tensor_lists)
for idx, tensor_group in enumerate(stacked_tensors):
split_and_fused[target_patterns[idx]].append(tensor_group)
for k, v in split_and_fused.items():
split_and_fused[k] = torch.cat(v, dim=self.concat_dim)
return split_and_fused
@property
def reverse_op(self) -> ConversionOps:
return ErnieSplitAndDecoupleTextVisionExperts(stack_dim=self.stack_dim, concat_dim=self.concat_dim)
class ErnieSplitAndDecoupleTextVisionExperts(ConversionOps):
r"""
Special operation that splits a fused module list over all original modules and
then decouples them into a mixed module list each over all keys.
Example with 2 original modules "Gate" and "Up" with 2 target keys "Text" and "Vision":
[GateUp_Text] [GateUp_Vision]
/ \ / \
/ \ / \
/ / \ \
/ / \ \
[Gate_Text] [Gate_Vision] [Up_Text] [Up_Vision]
| | | |
[ Gate ] [ Up ]
ModuleList 1 ModuleList 2
The splits are equal and are defined by the amount of original module lists.
The final decoupled module lists are defined by the amount of keys.
"""
def __init__(self, stack_dim: int = 0, concat_dim: int = 1):
self.stack_dim = stack_dim
self.concat_dim = concat_dim
@torch.no_grad()
def convert(
self,
input_dict: dict[str, list[torch.Tensor]],
source_patterns: list[str],
target_patterns: list[str],
config,
**kwargs,
) -> dict[str, list[torch.Tensor]]:
fused_modules = len(target_patterns)
valid_keys = input_dict.keys()
split_tensors = []
for key in source_patterns:
if key not in valid_keys:
raise ValueError(
f"Expected pattern {key} in collected tensors but only found tensors for: {valid_keys}"
)
# Assuming that we get single sized lists here to index with 0
split_tensors.append(input_dict[key][0].chunk(fused_modules, dim=self.concat_dim))
decoupled = {}
for idx, key in enumerate(target_patterns):
tensor_groups = [
list(torch.unbind(tensor_group[idx], dim=self.stack_dim)) for tensor_group in split_tensors
]
tensor_list = list(chain.from_iterable(tensor_groups))
targets = [key.replace("*", f"{i}") for i in range(len(tensor_list))]
decoupled |= dict(zip(targets, tensor_list))
return decoupled
@property
def reverse_op(self) -> ConversionOps:
return ErnieFuseAndSplitTextVisionExperts(stack_dim=self.stack_dim, concat_dim=self.concat_dim)
class Force16BytesAlignment(ConversionOps):
"""
Ensures that the given tensor is 16-bytes aligned in memory and clones it if not.
This guarantees 16-bytes alignment for kernels / implementations that use TMA or SIMD instructions like torch._grouped_mm.
"""
@torch.no_grad()
def convert(
self, input_dict: dict[str, torch.Tensor], source_patterns: list[str], target_patterns: list[str], **kwargs
) -> dict[str, torch.Tensor]:
target_pattern = self.get_target_pattern(input_dict, source_patterns, target_patterns)
tensors = next(iter(input_dict.values()))
tensor = tensors[0] if isinstance(tensors, list) else tensors
tensor = tensor.clone() if tensor.data_ptr() % 16 != 0 else tensor
return {target_pattern: tensor}
def get_target_pattern(
self, input_dict: dict[str, torch.Tensor], source_patterns: list[str], target_patterns: list[str]
) -> str:
if len(input_dict) != 1:
raise ValueError("Undefined Operation encountered!")
# Here it's the first operation of a chain, so return the source
if len(target_patterns) > 1:
if len(source_patterns) == 1:
return source_patterns[0]
else:
raise ValueError("Undefined Operation encountered!")
# Here it's the only operation, or the last operation in a chain, so we return the target
else:
return target_patterns[0]
@property
def reverse_op(self) -> ConversionOps:
return Force16BytesAlignment()
def process_target_pattern(pattern: str) -> tuple[str, str | None]:
"""
Process a target pattern for reverse mapping (when targets become sources).
This handles several edge cases in checkpoint conversion mappings:
- Removes `^` prefix and `$` suffix (start/end of string anchors)
- Removes negative lookahead/lookbehind assertions
- Detects capturing groups and replaces them with `\\1` backreference
Args:
pattern: The target pattern to process for reverse mapping.
Returns:
A tuple of (processed_pattern, captured_group) where captured_group is
the original capturing group found (e.g., "(encoder|decoder)") or None.
"""
# Some mapping contains `^` to notify start of string when matching -> remove it during reverse mapping
pattern = pattern.removeprefix("^")
# Some mapping contains `$` to notify end of string when matching -> remove it during reverse mapping
pattern = pattern.removesuffix("$")
# Remove negative lookahead/behind if any. This is ugly but needed for reverse mapping of
# Qwen2.5, Sam3, Ernie4.5 VL MoE!
pattern = re.sub(r"\(\?.+\)", "", pattern)
# Allow capturing groups in patterns, i.e. to add/remove a prefix to all keys (e.g. timm_wrapper, sam3)
capturing_group_match = re.search(r"\(.+?\)", pattern)
captured_group = None
if capturing_group_match:
captured_group = capturing_group_match.group(0)
pattern = pattern.replace(captured_group, r"\1", 1)
return pattern, captured_group
@dataclass(slots=True)
class WeightTransform:
source_patterns: str | list[str] = field(init=True)
target_patterns: str | list[str] = field(init=True)
compiled_sources: re.Pattern = field(init=False)
distributed_operation: TensorParallelLayer | None = None
quantization_operation: ConversionOps | None = None
collected_tensors: dict[str, list[Future]] = field(default_factory=lambda: defaultdict(list), init=False)
layer_targets: dict[str, set[str]] = field(default_factory=lambda: defaultdict(set), init=False)
def __setattr__(self, name, value):
if name in ("source_patterns", "target_patterns"):
# We do not allow to re-set the patterns, as they are linked between each other and changing one
# without the other can mess-up with the capturing groups/compiled sources
if hasattr(self, name):
raise ValueError(f"Cannot assign to field {name}, you should create a new instance")
# Switch str to list
elif isinstance(value, str):
value = [value]
object.__setattr__(self, name, value)
def __post_init__(self):
# Due to how our `_checkpoint_conversion_mapping` mappings are written, we need a few exceptions here
# when instantiating the reverse mapping (i.e. the targets become sources, and sources become targets)
# The issues lie in the sources usually, so here we need to check the targets for the reversed mapping
# Process target_patterns: detect capturing groups and replace with \1
# Store the original capturing group patterns for reverse mapping
target_capturing_groups: list[str] = []
for i, pattern in enumerate(self.target_patterns):
self.target_patterns[i], captured_group = process_target_pattern(pattern)
if captured_group is not None:
target_capturing_groups.append(captured_group)
# Validate that we only have one unique capturing group pattern across all targets
# This ensures deterministic reverse mapping when sources have \1 backreferences
unique_capturing_groups = set(target_capturing_groups)
if len(unique_capturing_groups) > 1:
raise ValueError(
f"Multiple different capturing groups found in target_patterns: {unique_capturing_groups}. "
f"All target patterns must use the same capturing group pattern."
)
unique_capturing_group = unique_capturing_groups.pop() if unique_capturing_groups else None
# We also need to check capturing groups in the sources during reverse mapping (e.g. timm_wrapper, sam3)
for i, pattern in enumerate(self.source_patterns):
if r"\1" in pattern:
if unique_capturing_group is None:
raise ValueError(
f"Source pattern '{pattern}' contains \\1 backreference, but no capturing groups "
f"found in target_patterns."
)
# Use the unique capturing group from target_patterns for all sources
pattern = pattern.replace(r"\1", unique_capturing_group, 1)
self.source_patterns[i] = pattern
# Construct the regex we will use to rename keys from the sources to the targets
branches = []
for i, source_pattern in enumerate(self.source_patterns):
group_name = f"g{i}"
pattern = source_pattern.replace(".*.", r"\..*\.")
branches.append(f"(?P<{group_name}>{pattern})")
self.compiled_sources = re.compile("|".join(branches))
def add_tensor(self, target_key: str, source_key: str, source_pattern: str, future: Future):
self.collected_tensors[source_pattern].append(future)
self.layer_targets[target_key].add(source_key)
def rename_source_key(self, source_key: str) -> tuple[str, str | None]:
"""
Return a tuple (renamed_key, source_pattern_producing_the_match).
Try renaming `source_key` according to the source and target patterns of the current WeightTransform.
In case of a one-to-many transform, i.e. we have several target patterns, the matching source pattern
will be replaced by the first of all the target patterns (they are then correctly expanded in the Operations).
"""
# Try matching one of the alternation branches
match_object = self.compiled_sources.search(source_key)
if match_object is None:
return source_key, None
# Find the source that produced the match (it's the first group that matched, as the search stops after first branch match)
matching_group_name = next(name for name, val in match_object.groupdict().items() if val is not None)
source_pattern_that_matched = self.source_patterns[int(matching_group_name[1:])]
# If we matched, we always replace with the first target pattern, in case we have several (one to many transform)
replacement = self.target_patterns[0]
# Allow capturing groups in patterns, i.e. to add a prefix to all keys (e.g. timm_wrapper, sam3)
if r"\1" in replacement:
# The index of the internal group we need to replace is the index of the matched named group as it comes
# inside that matched named group
replaced_group_idx = self.compiled_sources.groupindex[matching_group_name] + 1
replacement = replacement.replace(r"\1", match_object.group(replaced_group_idx))
renamed_key = source_key.replace(match_object.group(0), replacement)
return renamed_key, source_pattern_that_matched
def reverse_transform(self) -> WeightTransform:
"""Reverse the current `WeightTransform` instance, to be able to save with the opposite weight transformations."""
# TODO: check this and relax when quantizer have `reverse_op`
if self.quantization_operation is not None:
raise ValueError("Cannot reverse the transform with TP or quantization")
kwargs = {}
# Add the reverse ops if applicable (it needs to be provided at __init__)
if hasattr(self, "operations"):
# All reverse ops, in reverse order
kwargs["operations"] = [op.reverse_op for op in self.operations[::-1]]
reverse_transform = self.__class__(
source_patterns=self.target_patterns, target_patterns=self.source_patterns, **kwargs
)
return reverse_transform
def materialize_tensors(self) -> dict[str, list[torch.Tensor]]:
"""
Materialize all the tensors that were saved in `self.collected_tensors`. This function removes them from the
internal attribute to avoid keeping them in memory during the different `self.convert` operations, and return
a new dictionary (otherwise we use more memory than needed during loading).
We basically have 3 cases here:
- async loading (default): the tensors are Future instances that we need to wait for
- sync loading: the tensors are Callable, we need to call the Callable to actually load them from disk
- saving: the tensors are already torch.Tensor instances (the existing model weights)
"""
collected_tensors = {}
for key in set(self.collected_tensors.keys()):
# Remove from internal attribute
tensors = self.collected_tensors.pop(key)
# Async loading
if isinstance(tensors[0], Future):
tensors = [future.result() for future in tensors if future.result() is not None]
# Sync loading
elif callable(tensors[0]):
tensors = [func() for func in tensors]
# Add them to the new dictionary
collected_tensors[key] = tensors
return collected_tensors
@dataclass(slots=True)
class WeightRenaming(WeightTransform):
# Special case of WeightTransform that only renames keys without any conversion.
def convert(
self,
layer_name: str,
model=None,
config=None,
hf_quantizer=None,
loading_info: LoadStateDictInfo | None = None,
):
# Collect the tensors here - we use a new dictionary to avoid keeping them in memory in the internal
# attribute during the whole process
collected_tensors = self.materialize_tensors()
# Perform renaming op (for a simple WeightRenaming, `self.source_patterns` and `self.target_patterns` can
# only be of length 1, and are actually the full key names - we also have only 1 single related tensor)
target_key = self.target_patterns[0]
collected_tensors = {target_key: collected_tensors[self.source_patterns[0]]}
if hf_quantizer is not None and self.quantization_operation is not None:
with log_conversion_errors(
layer_name, loading_info, (len(collected_tensors), layer_name), self.quantization_operation
):
collected_tensors = self.quantization_operation.convert(
collected_tensors,
source_patterns=self.source_patterns,
target_patterns=self.target_patterns,
full_layer_name=target_key,
model=model,
config=config,
missing_keys=loading_info.missing_keys if loading_info else None,
)
return collected_tensors
# List of classes that are known to be able to use m:n
_INTERNAL_MANY_TO_MANY_CONVERSIONS = (
ErnieFuseAndSplitTextVisionExperts,
ErnieSplitAndDecoupleTextVisionExperts,
)
@dataclass(slots=True)
class WeightConverter(WeightTransform):
operations: list[ConversionOps] = field(default_factory=list, repr=False)
def __post_init__(self):
WeightTransform.__post_init__(self)
if bool(len(self.source_patterns) - 1) + bool(len(self.target_patterns) - 1) >= 2:
# We allow many-to-many only if we use an internal operation that can handle it
if not any(isinstance(op, _INTERNAL_MANY_TO_MANY_CONVERSIONS) for op in self.operations):
raise ValueError(
f"source keys={self.source_patterns}, target_patterns={self.target_patterns} but you can only have one to many, one to one or many to one."
)
if not self.operations:
raise ValueError("WeightConverter requires at least one operation.")
def convert(
self,
layer_name: str,
model=None,
config=None,
hf_quantizer=None,
loading_info: LoadStateDictInfo | None = None,
):
# Collect the tensors here - we use a new dictionary to avoid keeping them in memory in the internal
# attribute during the whole process
collected_tensors = self.materialize_tensors()
for op in self.operations:
with log_conversion_errors(layer_name, loading_info, (len(collected_tensors), layer_name), op):
collected_tensors = op.convert(
collected_tensors,
source_patterns=self.source_patterns,
target_patterns=self.target_patterns,
# Additional kwargs, usually not used
full_layer_name=layer_name,
model=model,
config=config,
missing_keys=loading_info.missing_keys if loading_info else None,
)
# Tensors are returned from ops with the target patterns, we need to expand them to full name.
# This means we need to grab the prefix and suffix to add to every target key
full_name = layer_name
if ".*." in layer_name:
full_name = layer_name.replace(".*.", ".0.")
try:
prefix, _, suffix = next(full_name.partition(k) for k in collected_tensors.keys() if k in full_name)
# Rename the tensors
collected_tensors = {prefix + k + suffix: v for k, v in collected_tensors.items()}
# some quantizers need to already rename in `convert` as they cannot only rely on prefix and suffix
except StopIteration:
pass
if hf_quantizer is not None and self.quantization_operation is not None:
with log_conversion_errors(
layer_name, loading_info, (len(collected_tensors), layer_name), self.quantization_operation
):
collected_tensors = self.quantization_operation.convert(
collected_tensors,
source_patterns=self.source_patterns,
target_patterns=self.target_patterns,
full_layer_name=layer_name,
config=config,
model=model,
missing_keys=loading_info.missing_keys if loading_info else None,
)
return collected_tensors
# For I/O bound operations (i.e. here reading files), it is better to have fewer threads, e.g. 4 is a good default.
# Having too many is actually harming performances quite a lot, i.e. using 16 can sometimes lead to taking TWICE
# as much time to load the same model
GLOBAL_WORKERS = min(4, os.cpu_count() or 4)
def _materialize_copy(tensor: torch.Tensor, device=None, dtype=None) -> torch.Tensor:
# This slicing is what actually loads the tensor from the safetensors slice object
tensor = tensor[...]
if dtype is not None or device is not None:
tensor = tensor.to(device=device, dtype=dtype)
return tensor
def spawn_materialize(
thread_pool: ThreadPoolExecutor | None, tensor: torch.Tensor, device=None, dtype=None
) -> Future | Callable:
"""Materialize a tensor from file asynchronously if `thread_pool` is provided, or return a Callable that will
load the tensor synchronously when called."""
def _job():
return _materialize_copy(tensor, device, dtype)
if thread_pool is not None:
return thread_pool.submit(_job)
else:
# Return the Callable here, not the Tensor itself, so we actually delay loading to avoid saturating cpu
# memory during Conversion
return _job
def spawn_tp_materialize(
thread_pool: ThreadPoolExecutor | None, tensor: torch.Tensor, sharding_method, tensor_idx, device=None, dtype=None
) -> Future | Callable:
"""Materialize and shard a tensor (according to the TP-plan) from file asynchronously if `thread_pool` is provided, or
return a Callable that will load the tensor synchronously when called."""
def _job():
return sharding_method.shard_tensor(tensor, tensor_idx=tensor_idx, device=device, dtype=dtype)
if thread_pool is not None:
return thread_pool.submit(_job)
else:
# Return the Callable here, not the Tensor itself, so we actually delay loading to avoid saturating cpu
# memory during Conversion
return _job
def dot_natural_key(s: str):
"""Sort key for state-dict names: split on ``"."`` and sort digits numerically
and strings alphabetically. We emit a tuple at each point to sort ints
first and strings second to avoid int-string comparison failures.
"""
result = []
for p in s.split("."):
if p.isdigit():
result.append((0, int(p)))
else:
result.append((1, p))
return result
@contextmanager
def log_conversion_errors(
first_target_key: str,
loading_info: LoadStateDictInfo | None,
extras: Any = None,
op: list[ConversionOps] | ConversionOps | None = None,
):
"""Catch all exceptions during `convert` calls, and log the errors for later. Re-raise a `SkipParameters` exception
that will be caught later to skip the parameters that raised the original Exception."""
try:
yield
except Exception as e:
# During reverse mapping, we do not log and skip errors
if loading_info is None:
raise e
def _format_op_name(curr_op: list[ConversionOps] | ConversionOps | None) -> str | None:
if curr_op is None:
return None
if isinstance(curr_op, (list, tuple, set)):
names = [o.__class__.__name__ for o in curr_op if o is not None]
if not names:
return None
return ", ".join(names)
return curr_op.__class__.__name__
op_name = _format_op_name(op)
tb_str = "".join(traceback.format_exception(type(e), e, e.__traceback__))
if isinstance(extras, tuple) and len(extras) == 2:
length, target_keys = extras
descriptor = f"{op_name} " if op_name else ""
loading_info.conversion_errors[first_target_key] = (
f"{tb_str}{e}\nError: {descriptor}on tensors destined for {target_keys}. Ckpt contains: {length}"
)
elif isinstance(extras, str):
suffix = f" via {op_name}" if op_name else ""
loading_info.conversion_errors[first_target_key] = (
f"{tb_str}{e}\nError{suffix} when processing parameter {extras}"
)
elif extras is None and op_name:
loading_info.conversion_errors[first_target_key] = f"{op_name}: {e}"
else:
loading_info.conversion_errors[first_target_key] = f"{extras} |Error: {e}"
# Raise a specific Exception that we can catch easily
raise SkipParameters()
def set_param_for_module(
model: PreTrainedModel,
target_name: str,
param_value: torch.Tensor,
loading_info: LoadStateDictInfo,
distributed_operation: TensorParallelLayer | None,
hf_quantizer: HfQuantizer,
):
module_path, _, param_name = target_name.rpartition(".")
module_obj = model.get_submodule(module_path) if module_path else model
ref = getattr(module_obj, param_name)
if ref is None:
loading_info.unexpected_keys.add(target_name)
else:
if not isinstance(param_value, torch.nn.Parameter):
if param_name not in module_obj._buffers:
param_value = torch.nn.Parameter(param_value, requires_grad=param_value.is_floating_point())
# Remove from missing keys (it's either mismatched, or all good)
loading_info.missing_keys.discard(target_name)
# Determine expected shape: for TP, use sharded shape; otherwise, use full shape
if distributed_operation is not None:
expected_shape = torch.Size(distributed_operation.get_expected_sharded_shape(ref.shape))
else:
expected_shape = ref.shape
if ref is not None and param_value.shape != expected_shape and hf_quantizer is None:
loading_info.mismatched_keys.add((target_name, param_value.shape, expected_shape))
else:
# super important otherwise _init_weight will re-init the param
param_value._is_hf_initialized = True
setattr(module_obj, param_name, param_value)
def offload_and_maybe_resave_param(
target_name: str,
param: torch.Tensor,
loading_info: LoadStateDictInfo,
disk_offload_folder: str,
disk_offload_index: dict,
applied_ops: WeightConverter | WeightRenaming,
) -> dict:
"""Takes care of correctly offloading `param`. If it's not already present in the `disk_offload_index`, or if any
WeightConverter operations have been applied, it will resave the new parameter. Otherwise, it will use the original
`disk_offload_index` for this given param."""
# We need to remove from missing keys
loading_info.missing_keys.discard(target_name)
# If not already offloaded, or if we applied any special Operation except Renaming, we need to re-save
if target_name not in disk_offload_index or isinstance(applied_ops, WeightConverter):
disk_offload_index = offload_weight(param, target_name, disk_offload_folder, disk_offload_index)
return disk_offload_index
class SkipParameters(Exception):
"""Control-flow sentinel: abort processing of the current parameters only (that were supposed to be created
by a WeightConverter)."""
pass
def rename_source_key(
source_key: str,
weight_renamings: list[WeightRenaming],
weight_converters: list[WeightConverter],
prefix: str | None = None,
meta_state_dict: dict | None = None,
) -> tuple[str, str | None]:
"""
Rename a source key given all the renaming and weight conversion patterns we have. Also takes care of adding/removing
the base model prefix during loading if necessary.
"""
renamed_key = source_key
# 1. apply all renamings in turns (if multiple match, it's the responsibility of the mappings to make sure they
# are coherent)
for renaming in weight_renamings:
renamed_key, _ = renaming.rename_source_key(renamed_key)
# 2. apply renaming through weight conversions on the key if we have any WeightConverter (here we stop after
# the first match, as we assume only 1 converter can match any source key)
source_pattern = None
for converter in weight_converters:
renamed_key, source_pattern = converter.rename_source_key(renamed_key)
if source_pattern is not None:
break
# 3. check if we need to add or remove prefix if necessary (only during loading, not saving)
if prefix is not None and meta_state_dict is not None:
if (
renamed_key.startswith(prefix)
and meta_state_dict.get(re.sub(f"^{prefix}.", "", renamed_key, count=1)) is not None
):
renamed_key = re.sub(f"^{prefix}.", "", renamed_key, count=1)
elif meta_state_dict.get(f"{prefix}.{renamed_key}") is not None:
renamed_key = f"{prefix}.{renamed_key}"
return renamed_key, source_pattern
def convert_and_load_state_dict_in_model(
model: PreTrainedModel,
state_dict: dict[str, Any],
load_config: LoadStateDictConfig,
tp_plan: dict[str, str] | None,
disk_offload_index: dict | None = None,
):
r"""
We build a mapping from the keys obtained by renaming each of the checkpoint keys according to the weight_mapping rules.
Then we load the tensors into the model, applying any conversion operations as needed.
The `param_name_to_load` will look like this:
{
"model.layers.0.attention.q.weight": # Notice here there is only the first key of the target keys
WeightConverter(
source_patterns=["qkv"],
target_patterns=["q", "k","v"],
operations=[Chunk(dim=0, chunks=3)]),
collected_tensors={
"qkv": [Future]},
layer_targets={
"model.layers.0.attention.q.weight": {"model.layers.0.attention.qkv.weight"},
"model.layers.0.attention.k.weight": {"model.layers.0.attention.qkv.weight"},
"model.layers.0.attention.v.weight": {"model.layers.0.attention.qkv.weight"},
}
),
...
}
We make sure that the keys are the full keys. The only "nit" here is that 1 key can map to multiple target keys (e.g. qkv -> q, k, v).
In that case the weight converter will take care of doing the appropriate renaming.
For example for:
```python
WeightConverter(
source_patterns=["mlp.experts.*.gate_proj.weight","mlp.experts.*.up_proj.weight"],
target_patterns="mlp.experts.gate_up_proj",
operations=[MergeModulelist(dim=0), Concatenate(dim=1)],
)
```
we would have the following collected tensors:
```python
collected_tensors = {
"mlp.experts.*.gate_proj.weight": [Future, Future, Future, Future, Future, Future, Future, Future],
"mlp.experts.*.up_proj.weight": [Future, Future, Future, Future, Future, Future, Future, Future],
}
```
The first op, `MergeModulelist`, would stack the 8 tensors of each source but will not "rename" them into the fused target name.
The second op, `Concatenate`, would then rename the fused tensor into the final target name.
If we want to split `qkv` we would have:
```python
collected_tensors = {
"attention.qkv.weight": [Future], # here its the full SOURCE keys.
}
```
The `Chunk` operation would then split the single tensor into 3 and rename them accordingly and update the collected tensors to:
```python
realized_values = {
"attention.q.weight": [Tensor],
"attention.k.weight": [Tensor],
"attention.v.weight": [Tensor],
}
```
Now that this is done, we can quantize / dequantize accordingly the collected_tensors.
For some quantization methods, we need to gather different tensors:
```python
# for "medmekk/llama-3.2-1b-float8-torchao"
WeightConverter(
source_patterns=[":qdata", ":scale"],
target_patterns="",
operations=[TorchaoDeserialize()],
)
```
This will collect all tensors that have the same prefix, but end with `:qdata` or `:scale`. This will give us:
```python
all_weight_mapping = {
"model.layers.13.self_attn.o_proj.weight": WeightConverter(
source_patterns=[":qdata", ":scale"],
target_patterns="",
operations=[TorchaoDeserialize()],
collected_tensors={
":qdata": [Future],
":scale": [Future],
},
...
}
```
"""
prefix = model.base_model_prefix
tp_plan = tp_plan or {}
device_map = load_config.device_map or {"": "cpu"}
hf_quantizer = load_config.hf_quantizer
dtype = load_config.dtype
device_mesh = load_config.device_mesh
disk_offload_folder = load_config.disk_offload_folder
offload_buffers = load_config.offload_buffers
dtype_plan = load_config.dtype_plan or {}
weight_mapping = load_config.weight_mapping or []
meta_model_state_dict = model.state_dict()
model_buffers = {k for k, _ in model.named_buffers()}
# We start from all missing keys, and we will remove/add them from the proper containers as loading advances
loading_info = LoadStateDictInfo(
missing_keys=set(meta_model_state_dict.keys()),
unexpected_keys=set(),
mismatched_keys=set(),
conversion_errors={},
error_msgs=[],
)
# We use threading by default, if not explicitly deactivated via env variable. If we have to offload,
# we cannot use it either to control the memory as we are under memory constraints, so we need to be sequential
if is_env_variable_true("HF_DEACTIVATE_ASYNC_LOAD") or "disk" in device_map.values():
thread_pool = None
else:
thread_pool = ThreadPoolExecutor(max_workers=GLOBAL_WORKERS)
renamings = [entry for entry in weight_mapping if isinstance(entry, WeightRenaming)]
converters = [entry for entry in weight_mapping if isinstance(entry, WeightConverter)]
param_name_to_load: dict[str, WeightRenaming | WeightConverter] = {}
# build '(?P<g0>.*.*\\.block_sparse_moe\\..*)' and group to source {'g0': '*.block_sparse_moe.'}
# and target to source {'g0': '*.mlp.'}. This allows us to quickly find which pattern matched.
if tp_plan != {}:
tp_plan_alt, tp_plan_by_group_name, _ = build_glob_alternation(list(tp_plan.keys()))
if dtype_plan != {}:
dtype_policy_alt, dtype_policy_by_group_name, _ = build_glob_alternation(list(dtype_plan.keys()))
pattern_to_converter = {k: converter for converter in converters for k in converter.source_patterns}
state_dict = sorted(state_dict.items(), key=lambda kv: dot_natural_key(kv[0]))
for original_key, tensor in state_dict:
# 1. Rename the key according to all renaming pattern and optional weight converter patterns
renamed_key, source_pattern = rename_source_key(
original_key, renamings, converters, prefix, meta_model_state_dict
)
# 2. finally, collect the tensor into the proper converter
if renamed_key in meta_model_state_dict:
empty_param = meta_model_state_dict.get(renamed_key)
# If we enter here, we have a WeightConverter operation to perform
if source_pattern is not None:
new_converter = deepcopy(pattern_to_converter[source_pattern])
# each target key gets its own converter instance
mapping = param_name_to_load.setdefault(renamed_key, new_converter)
# Otherwise, only potential renaming
else:
mapping = param_name_to_load.setdefault(renamed_key, WeightRenaming(original_key, renamed_key))
source_pattern = original_key
# 3. Handle dtype casting
if (
hf_quantizer
and not hf_quantizer.pre_quantized
and hf_quantizer.param_needs_quantization(model, renamed_key)
):
mapping.quantization_operation = hf_quantizer.get_quantize_ops()
_dtype = dtype
if hf_quantizer and hf_quantizer.pre_quantized and original_key != renamed_key:
# if the key was renamed as it is not available in the state dict otherwise, it means that we are deserializing it,
# so we need to make sure to load the tensor with the same dtype from the checkpoint
# TODO: make the condition more srict for native fp8 model such as qwen2moe fp8
_dtype = None
elif dtype_plan != {} and dtype_policy_alt.search(renamed_key):
matched_dtype_pattern = dtype_policy_alt.search(renamed_key)
if matched_dtype_pattern is not None:
_dtype = dtype_plan[dtype_policy_by_group_name[matched_dtype_pattern.lastgroup]]
elif empty_param is not None and empty_param.dtype != _dtype:
_dtype = empty_param.dtype # usually correct when initializing
# 4. Handle TP sharding or device_map placement
future_or_tensor = None
if device_mesh:
if matched_tp_pattern := tp_plan_alt.search(renamed_key):
matched_tp_pattern = tp_plan_by_group_name[matched_tp_pattern.lastgroup]
if getattr(mapping, "distributed_operation", None) is None:
tp_layer = ALL_PARALLEL_STYLES[model.tp_plan[matched_tp_pattern]].__class__
mapping.distributed_operation = tp_layer(
device_mesh=device_mesh, rank=device_mesh.get_local_rank(), empty_param=empty_param.clone()
)
shard_index = (
len(mapping.collected_tensors.get(source_pattern, []))
if isinstance(mapping, WeightConverter) and isinstance(mapping.operations[0], MergeModulelist)
else None
)
future_or_tensor = spawn_tp_materialize(
thread_pool,
tensor,
mapping.distributed_operation,
shard_index,
device_map[""],
_dtype,
)
if future_or_tensor is None:
param_device = get_device(device_map, renamed_key, valid_torch_device=True)
future_or_tensor = spawn_materialize(thread_pool, tensor, param_device, _dtype)
mapping.add_tensor(renamed_key, original_key, source_pattern, future_or_tensor)
elif source_pattern is not None: # add all target keys as unexpected
mapping = pattern_to_converter[source_pattern]
for k in mapping.target_patterns:
loading_info.unexpected_keys.add(renamed_key.replace(mapping.target_patterns[0], k))
else:
loading_info.unexpected_keys.add(renamed_key)
try:
total_entries = len(param_name_to_load)
with logging.tqdm(total=total_entries, desc="Loading weights") as pbar:
for first_param_name, mapping in param_name_to_load.items():
pbar.update(1)
pbar.set_postfix({"Materializing param": first_param_name})
pbar.refresh()
try:
realized_value = mapping.convert(
first_param_name,
model=model,
config=model.config,
hf_quantizer=hf_quantizer,
loading_info=loading_info,
)
for target_name, param in realized_value.items():
param = param[0] if isinstance(param, list) else param
param_device = get_device(device_map, target_name)
# Offloading support
if param_device == "disk" and (target_name not in model_buffers or offload_buffers):
disk_offload_index = offload_and_maybe_resave_param(
target_name, param, loading_info, disk_offload_folder, disk_offload_index, mapping
)
else:
set_param_for_module(
model,
target_name,
param,
loading_info,
mapping.distributed_operation,
hf_quantizer,
)
# Cleanup all the tensors that were gathered before next iteration
del realized_value
except SkipParameters:
continue
# Close the pool, independently of whether the code was interrupted or finished successfully
finally:
if thread_pool is not None:
# `cancel_futures=True` in case the program was interrupted, to avoid wasting time on exit
thread_pool.shutdown(wait=False, cancel_futures=True)
# Keep the current weight conversion mapping for later saving (in case it was coming directly from the user)
model._weight_conversions = weight_mapping
return loading_info, disk_offload_index
def revert_weight_conversion(model: PreTrainedModel, state_dict: dict[str, torch.Tensor]):
"""
Revert the conversion mapping that was used to load the model with `from_pretrained`, or the default one
if the model was created in another way and is part of the default mappings.
"""
weight_conversions = getattr(model, "_weight_conversions", None)
# In this case, the model was not created with `from_pretrained` -> let's check if it's in the hardcoded
# mappings, and recreate the mapping from there if it is
if weight_conversions is None:
from .conversion_mapping import get_model_conversion_mapping
# Do not resave with the legacy renaming, if present
weight_conversions = get_model_conversion_mapping(model, add_legacy=False)
weight_conversions = weight_conversions if len(weight_conversions) > 0 else None
# We did not find any operations to perform -> quick escape
if weight_conversions is None:
return state_dict
# Reverse all Transform to correctly match keys
reverse_weight_conversion = [conversion.reverse_transform() for conversion in weight_conversions]
# If we are still here, we need to create the (reverse) conversion mapping from scratch
renamings = [entry for entry in reverse_weight_conversion if isinstance(entry, WeightRenaming)]
converters = [entry for entry in reverse_weight_conversion if isinstance(entry, WeightConverter)]
pattern_to_converter = {k: converter for converter in converters for k in converter.source_patterns}
conversion_mapping = {}
state_dict = sorted(state_dict.items(), key=lambda kv: dot_natural_key(kv[0]))
for original_key, tensor in state_dict:
# Rename the key according to all renaming pattern and optional weight converter patterns
renamed_key, source_pattern = rename_source_key(original_key, renamings, converters)
if source_pattern is not None:
new_converter = deepcopy(pattern_to_converter[source_pattern])
# each target key gets its own converter instance
mapping = conversion_mapping.setdefault(renamed_key, new_converter)
else:
mapping = conversion_mapping.setdefault(renamed_key, WeightRenaming(original_key, renamed_key))
source_pattern = original_key
mapping.add_tensor(renamed_key, original_key, source_pattern, tensor)
new_state_dict = {}
for first_param_name, reversed_converter in conversion_mapping.items():
# Apply the reverse converter
realized_value = reversed_converter.convert(first_param_name, model=model, config=model.config)
for target_name, param in realized_value.items():
param = param[0] if isinstance(param, list) else param
new_state_dict[target_name] = param
return new_state_dict
|
python
|
github
|
https://github.com/huggingface/transformers
|
src/transformers/core_model_loading.py
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_install_os
extends_documentation_fragment: nxos
short_description: Set boot options like boot, kickstart image and issu.
description:
- Install an operating system by setting the boot options like boot
image and kickstart image and optionally select to install using
ISSU (In Server Software Upgrade).
notes:
- Tested against the following platforms and images
- N9k 7.0(3)I4(6), 7.0(3)I5(3), 7.0(3)I6(1), 7.0(3)I7(1), 7.0(3)F2(2), 7.0(3)F3(2)
- N3k 6.0(2)A8(6), 6.0(2)A8(8), 7.0(3)I6(1), 7.0(3)I7(1)
- N7k 7.3(0)D1(1), 8.0(1), 8.1(1), 8.2(1)
- This module requires both the ANSIBLE_PERSISTENT_CONNECT_TIMEOUT and
ANSIBLE_PERSISTENT_COMMAND_TIMEOUT timers to be set to 600 seconds or higher.
The module will exit if the timers are not set properly.
- Do not include full file paths, just the name of the file(s) stored on
the top level flash directory.
- This module attempts to install the software immediately,
which may trigger a reboot.
- In check mode, the module will indicate if an upgrade is needed and
whether or not the upgrade is disruptive or non-disruptive(ISSU).
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbibo (@GGabriele)
version_added: 2.2
options:
system_image_file:
description:
- Name of the system (or combined) image file on flash.
required: true
kickstart_image_file:
description:
- Name of the kickstart image file on flash.
(Not required on all Nexus platforms)
issu:
version_added: "2.5"
description:
- Upgrade using In Service Software Upgrade (ISSU).
(Supported on N5k, N7k, N9k platforms)
- Selecting 'required' or 'yes' means that upgrades will only
proceed if the switch is capable of ISSU.
- Selecting 'desired' means that upgrades will use ISSU if possible
but will fall back to disruptive upgrade if needed.
- Selecting 'no' means do not use ISSU. Forced disruptive.
choices: ['required','desired', 'yes', 'no']
default: 'no'
'''
EXAMPLES = '''
- name: Install OS on N9k
check_mode: no
nxos_install_os:
system_image_file: nxos.7.0.3.I6.1.bin
issu: desired
- name: Wait for device to come back up with new image
wait_for:
port: 22
state: started
timeout: 500
delay: 60
host: "{{ inventory_hostname }}"
- name: Check installed OS for newly installed version
nxos_command:
commands: ['show version | json']
provider: "{{ connection }}"
register: output
- assert:
that:
- output['stdout'][0]['kickstart_ver_str'] == '7.0(3)I6(1)'
'''
RETURN = '''
install_state:
description: Boot and install information.
returned: always
type: dict
sample: {
"install_state": [
"Compatibility check is done:",
"Module bootable Impact Install-type Reason",
"------ -------- -------------- ------------ ------",
" 1 yes non-disruptive reset ",
"Images will be upgraded according to following table:",
"Module Image Running-Version(pri:alt) New-Version Upg-Required",
"------ ---------- ---------------------------------------- -------------------- ------------",
" 1 nxos 7.0(3)I6(1) 7.0(3)I7(1) yes",
" 1 bios v4.4.0(07/12/2017) v4.4.0(07/12/2017) no"
],
}
'''
import re
from time import sleep
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
# Output options are 'text' or 'json'
def execute_show_command(module, command, output='text'):
cmds = [{
'command': command,
'output': output,
}]
return run_commands(module, cmds)
def get_platform(module):
"""Determine platform type"""
data = execute_show_command(module, 'show inventory', 'json')
pid = data[0]['TABLE_inv']['ROW_inv'][0]['productid']
if re.search(r'N3K', pid):
type = 'N3K'
elif re.search(r'N5K', pid):
type = 'N5K'
elif re.search(r'N6K', pid):
type = 'N6K'
elif re.search(r'N7K', pid):
type = 'N7K'
elif re.search(r'N9K', pid):
type = 'N9K'
else:
type = 'unknown'
return type
def parse_show_install(data):
"""Helper method to parse the output of the 'show install all impact' or
'install all' commands.
Sample Output:
Installer will perform impact only check. Please wait.
Verifying image bootflash:/nxos.7.0.3.F2.2.bin for boot variable "nxos".
[####################] 100% -- SUCCESS
Verifying image type.
[####################] 100% -- SUCCESS
Preparing "bios" version info using image bootflash:/nxos.7.0.3.F2.2.bin.
[####################] 100% -- SUCCESS
Preparing "nxos" version info using image bootflash:/nxos.7.0.3.F2.2.bin.
[####################] 100% -- SUCCESS
Performing module support checks.
[####################] 100% -- SUCCESS
Notifying services about system upgrade.
[####################] 100% -- SUCCESS
Compatibility check is done:
Module bootable Impact Install-type Reason
------ -------- -------------- ------------ ------
8 yes disruptive reset Incompatible image for ISSU
21 yes disruptive reset Incompatible image for ISSU
Images will be upgraded according to following table:
Module Image Running-Version(pri:alt) New-Version Upg-Required
------ ---------- ---------------------------------------- ------------
8 lcn9k 7.0(3)F3(2) 7.0(3)F2(2) yes
8 bios v01.17 v01.17 no
21 lcn9k 7.0(3)F3(2) 7.0(3)F2(2) yes
21 bios v01.70 v01.70 no
"""
if len(data) > 0:
data = massage_install_data(data)
ud = {'raw': data}
ud['processed'] = []
ud['disruptive'] = False
ud['upgrade_needed'] = False
ud['error'] = False
ud['invalid_command'] = False
ud['install_in_progress'] = False
ud['server_error'] = False
ud['upgrade_succeeded'] = False
ud['use_impact_data'] = False
# Check for server errors
if isinstance(data, int):
if data == -1:
ud['server_error'] = True
elif data >= 500:
ud['server_error'] = True
elif data == -32603:
ud['server_error'] = True
return ud
else:
ud['list_data'] = data.split('\n')
for x in ud['list_data']:
# Check for errors and exit if found.
if re.search(r'Pre-upgrade check failed', x):
ud['error'] = True
break
if re.search(r'[I|i]nvalid command', x):
ud['invalid_command'] = True
ud['error'] = True
break
if re.search(r'No install all data found', x):
ud['error'] = True
break
# Check for potentially transient conditions
if re.search(r'Another install procedure may\s*be in progress', x):
ud['install_in_progress'] = True
break
if re.search(r'Backend processing error', x):
ud['server_error'] = True
break
if re.search(r'timed out', x):
ud['server_error'] = True
break
if re.search(r'^(-1|5\d\d)$', x):
ud['server_error'] = True
break
# Check for messages indicating a successful upgrade.
if re.search(r'Finishing the upgrade', x):
ud['upgrade_succeeded'] = True
break
if re.search(r'Install has been successful', x):
ud['upgrade_succeeded'] = True
break
if re.search(r'Switching over onto standby', x):
ud['upgrade_succeeded'] = True
break
# We get these messages when the upgrade is non-disruptive and
# we loose connection with the switchover but far enough along that
# we can be confident the upgrade succeeded.
if re.search(r'timeout trying to send command: install', x):
ud['upgrade_succeeded'] = True
ud['use_impact_data'] = True
break
if re.search(r'[C|c]onnection failure: timed out', x):
ud['upgrade_succeeded'] = True
ud['use_impact_data'] = True
break
# Begin normal parsing.
if re.search(r'----|Module|Images will|Compatibility', x):
ud['processed'].append(x)
continue
# Check to see if upgrade will be disruptive or non-disruptive and
# build dictionary of individual modules and their status.
# Sample Line:
#
# Module bootable Impact Install-type Reason
# ------ -------- ---------- ------------ ------
# 8 yes disruptive reset Incompatible image
rd = r'(\d+)\s+(\S+)\s+(disruptive|non-disruptive)\s+(\S+)'
mo = re.search(rd, x)
if mo:
ud['processed'].append(x)
key = 'm%s' % mo.group(1)
field = 'disruptive'
if mo.group(3) == 'non-disruptive':
ud[key] = {field: False}
else:
ud[field] = True
ud[key] = {field: True}
field = 'bootable'
if mo.group(2) == 'yes':
ud[key].update({field: True})
else:
ud[key].update({field: False})
continue
# Check to see if switch needs an upgrade and build a dictionary
# of individual modules and their individual upgrade status.
# Sample Line:
#
# Module Image Running-Version(pri:alt) New-Version Upg-Required
# ------ ----- ---------------------------------------- ------------
# 8 lcn9k 7.0(3)F3(2) 7.0(3)F2(2) yes
mo = re.search(r'(\d+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(yes|no)', x)
if mo:
ud['processed'].append(x)
key = 'm%s_%s' % (mo.group(1), mo.group(2))
field = 'upgrade_needed'
if mo.group(5) == 'yes':
ud[field] = True
ud[key] = {field: True}
else:
ud[key] = {field: False}
continue
return ud
def massage_install_data(data):
# Transport cli returns a list containing one result item.
# Transport nxapi returns a list containing two items. The second item
# contains the data we are interested in.
default_error_msg = 'No install all data found'
if len(data) == 1:
result_data = data[0]
elif len(data) == 2:
result_data = data[1]
else:
result_data = default_error_msg
# Further processing may be needed for result_data
if len(data) == 2 and isinstance(data[1], dict):
if 'clierror' in data[1].keys():
result_data = data[1]['clierror']
elif 'code' in data[1].keys() and data[1]['code'] == '500':
# We encountered a backend processing error for nxapi
result_data = data[1]['msg']
else:
result_data = default_error_msg
return result_data
def build_install_cmd_set(issu, image, kick, type, force=True):
commands = ['terminal dont-ask']
# Different NX-OS plaforms behave differently for
# disruptive and non-disruptive upgrade paths.
#
# 1) Combined kickstart/system image:
# * Use option 'non-disruptive' for issu.
# * Omit option non-disruptive' for distruptive upgrades.
# 2) Separate kickstart + system images.
# * Omit hidden 'force' option for issu.
# * Use hidden 'force' option for disruptive upgrades.
# * Note: Not supported on all platforms
if re.search(r'required|desired|yes', issu):
if kick is None:
issu_cmd = 'non-disruptive'
else:
issu_cmd = ''
else:
if kick is None:
issu_cmd = ''
else:
issu_cmd = 'force' if force else ''
if type == 'impact':
rootcmd = 'show install all impact'
# The force option is not available for the impact command.
if kick:
issu_cmd = ''
else:
rootcmd = 'install all'
if kick is None:
commands.append(
'%s nxos %s %s' % (rootcmd, image, issu_cmd))
else:
commands.append(
'%s %s system %s kickstart %s' % (rootcmd, issu_cmd, image, kick))
return commands
def parse_show_version(data):
version_data = {'raw': data[0].split('\n')}
version_data['version'] = ''
version_data['error'] = False
for x in version_data['raw']:
mo = re.search(r'(kickstart|system|NXOS):\s+version\s+(\S+)', x)
if mo:
version_data['version'] = mo.group(2)
continue
if version_data['version'] == '':
version_data['error'] = True
return version_data
def check_mode_legacy(module, issu, image, kick=None):
"""Some platforms/images/transports don't support the 'install all impact'
command so we need to use a different method."""
current = execute_show_command(module, 'show version', 'json')[0]
# Call parse_show_data on empty string to create the default upgrade
# data stucture dictionary
data = parse_show_install('')
upgrade_msg = 'No upgrade required'
# Process System Image
data['error'] = False
tsver = 'show version image bootflash:%s' % image
data['upgrade_cmd'] = [tsver]
target_image = parse_show_version(execute_show_command(module, tsver))
if target_image['error']:
data['error'] = True
data['raw'] = target_image['raw']
if current['kickstart_ver_str'] != target_image['version'] and not data['error']:
data['upgrade_needed'] = True
data['disruptive'] = True
upgrade_msg = 'Switch upgraded: system: %s' % tsver
# Process Kickstart Image
if kick is not None and not data['error']:
tkver = 'show version image bootflash:%s' % kick
data['upgrade_cmd'].append(tsver)
target_kick = parse_show_version(execute_show_command(module, tkver))
if target_kick['error']:
data['error'] = True
data['raw'] = target_kick['raw']
if current['kickstart_ver_str'] != target_kick['version'] and not data['error']:
data['upgrade_needed'] = True
data['disruptive'] = True
upgrade_msg = upgrade_msg + ' kickstart: %s' % tkver
data['list_data'] = data['raw']
data['processed'] = upgrade_msg
return data
def check_mode_nextgen(module, issu, image, kick=None):
"""Use the 'install all impact' command for check_mode"""
opts = {'ignore_timeout': True}
commands = build_install_cmd_set(issu, image, kick, 'impact')
data = parse_show_install(load_config(module, commands, True, opts))
# If an error is encountered when issu is 'desired' then try again
# but set issu to 'no'
if data['error'] and issu == 'desired':
issu = 'no'
commands = build_install_cmd_set(issu, image, kick, 'impact')
# The system may be busy from the previous call to check_mode so loop
# until it's done.
data = check_install_in_progress(module, commands, opts)
if data['server_error']:
data['error'] = True
data['upgrade_cmd'] = commands
return data
def check_install_in_progress(module, commands, opts):
for attempt in range(20):
data = parse_show_install(load_config(module, commands, True, opts))
if data['install_in_progress']:
sleep(1)
continue
break
return data
def check_mode(module, issu, image, kick=None):
"""Check switch upgrade impact using 'show install all impact' command"""
data = check_mode_nextgen(module, issu, image, kick)
if data['server_error']:
# We encountered an unrecoverable error in the attempt to get upgrade
# impact data from the 'show install all impact' command.
# Fallback to legacy method.
data = check_mode_legacy(module, issu, image, kick)
if data['invalid_command']:
# If we are upgrading from a device running a separate kickstart and
# system image the impact command will fail.
# Fallback to legacy method.
data = check_mode_legacy(module, issu, image, kick)
return data
def do_install_all(module, issu, image, kick=None):
"""Perform the switch upgrade using the 'install all' command"""
impact_data = check_mode(module, issu, image, kick)
if module.check_mode:
# Check mode set in the playbook so just return the impact data.
msg = '*** SWITCH WAS NOT UPGRADED: IMPACT DATA ONLY ***'
impact_data['processed'].append(msg)
return impact_data
if impact_data['error']:
# Check mode discovered an error so return with this info.
return impact_data
elif not impact_data['upgrade_needed']:
# The switch is already upgraded. Nothing more to do.
return impact_data
else:
# If we get here, check_mode returned no errors and the switch
# needs to be upgraded.
if impact_data['disruptive']:
# Check mode indicated that ISSU is not possible so issue the
# upgrade command without the non-disruptive flag unless the
# playbook specified issu: yes/required.
if issu == 'yes':
msg = 'ISSU/ISSD requested but impact data indicates ISSU/ISSD is not possible'
module.fail_json(msg=msg, raw_data=impact_data['list_data'])
else:
issu = 'no'
commands = build_install_cmd_set(issu, image, kick, 'install')
opts = {'ignore_timeout': True}
# The system may be busy from the call to check_mode so loop until
# it's done.
upgrade = check_install_in_progress(module, commands, opts)
if upgrade['invalid_command'] and 'force' in commands[1]:
# Not all platforms support the 'force' keyword. Check for this
# condition and re-try without the 'force' keyword if needed.
commands = build_install_cmd_set(issu, image, kick, 'install', False)
upgrade = check_install_in_progress(module, commands, opts)
upgrade['upgrade_cmd'] = commands
# Special case: If we encounter a server error at this stage
# it means the command was sent and the upgrade was started but
# we will need to use the impact data instead of the current install
# data.
if upgrade['server_error']:
upgrade['upgrade_succeeded'] = True
upgrade['use_impact_data'] = True
if upgrade['use_impact_data']:
if upgrade['upgrade_succeeded']:
upgrade = impact_data
upgrade['upgrade_succeeded'] = True
else:
upgrade = impact_data
upgrade['upgrade_succeeded'] = False
if not upgrade['upgrade_succeeded']:
upgrade['error'] = True
return upgrade
def main():
argument_spec = dict(
system_image_file=dict(required=True),
kickstart_image_file=dict(required=False),
issu=dict(choices=['required', 'desired', 'no', 'yes'], default='no'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
# Get system_image_file(sif), kickstart_image_file(kif) and
# issu settings from module params.
sif = module.params['system_image_file']
kif = module.params['kickstart_image_file']
issu = module.params['issu']
if re.search(r'(yes|required)', issu):
issu = 'yes'
if kif == 'null' or kif == '':
kif = None
install_result = do_install_all(module, issu, sif, kick=kif)
if install_result['error']:
cmd = install_result['upgrade_cmd']
msg = 'Failed to upgrade device using command: %s' % cmd
module.fail_json(msg=msg, raw_data=install_result['list_data'])
state = install_result['processed']
changed = install_result['upgrade_needed']
module.exit_json(changed=changed, install_state=state, warnings=warnings)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#
# Copyright (C) 2015, Cumulus Networks www.cumulusnetworks.com
#
#
DOCUMENTATION = '''
---
module: cl_ports
author: Cumulus Networks
short_description: Configure Cumulus Switch port attributes (ports.conf)
description:
- Set the initial port attribute defined in the Cumulus Linux ports.conf, \
file. This module does not do any error checking at the moment. Be careful to \
not include ports that do not exist on the switch. \
Carefully read the original \
ports.conf file for any exceptions or limitations. \
For more details go the Configure Switch Port Attribute Documentation @ \
http://docs.cumulusnetworks.com
options:
speed_10g:
description:
- list of ports to run initial run at 10G
speed_40g:
description:
- list of ports to run initial run at 40G
speed_4_by_10g:
description:
- list of 40G ports that will be unganged to run as 4 10G ports.
speed_40g_div_4:
description:
- list of 10G ports that will be ganged to form a 40G port
'''
EXAMPLES = '''
Example playbook entries using the cl_ports module to \
manage the switch attributes
defined in the ports.conf file on Cumulus Linux
## Unganged port config using simple args
- name: configure ports.conf setup
cl_ports: speed_4_by_10g="swp1, swp32" speed_40g="swp2-31"
notify: restart switchd
## Unganged port configuration on certain ports using complex args
- name: configure ports.conf setup
cl_ports:
speed_4_by_10g: ['swp1-3', 'swp6']
speed_40g: ['swp4-5', 'swp7-32']
notify: restart switchd
'''
PORTS_CONF = '/etc/cumulus/ports.conf'
def hash_existing_ports_conf(module):
module.ports_conf_hash = {}
if not os.path.exists(PORTS_CONF):
return False
try:
existing_ports_conf = open(PORTS_CONF).readlines()
except IOError as error_msg:
_msg = "Failed to open %s: %s" % (PORTS_CONF, error_msg)
module.fail_json(msg=_msg)
return # for testing only should return on module.fail_json
for _line in existing_ports_conf:
_m0 = re.match(r'^(\d+)=(\w+)', _line)
if _m0:
_portnum = int(_m0.group(1))
_speed = _m0.group(2)
module.ports_conf_hash[_portnum] = _speed
def generate_new_ports_conf_hash(module):
new_ports_conf_hash = {}
convert_hash = {
'speed_40g_div_4': '40G/4',
'speed_4_by_10g': '4x10G',
'speed_10g': '10G',
'speed_40g': '40G'
}
for k in module.params.keys():
port_range = module.params[k]
port_setting = convert_hash[k]
if port_range:
port_range = [x for x in port_range if x]
for port_str in port_range:
port_range_str = port_str.replace('swp', '').split('-')
if len(port_range_str) == 1:
new_ports_conf_hash[int(port_range_str[0])] = \
port_setting
else:
int_range = map(int, port_range_str)
portnum_range = range(int_range[0], int_range[1]+1)
for i in portnum_range:
new_ports_conf_hash[i] = port_setting
module.new_ports_hash = new_ports_conf_hash
def compare_new_and_old_port_conf_hash(module):
ports_conf_hash_copy = module.ports_conf_hash.copy()
module.ports_conf_hash.update(module.new_ports_hash)
port_num_length = len(module.ports_conf_hash.keys())
orig_port_num_length = len(ports_conf_hash_copy.keys())
if port_num_length != orig_port_num_length:
module.fail_json(msg="Port numbering is wrong. \
Too many or two few ports configured")
return False
elif ports_conf_hash_copy == module.ports_conf_hash:
return False
return True
def make_copy_of_orig_ports_conf(module):
if os.path.exists(PORTS_CONF + '.orig'):
return
try:
shutil.copyfile(PORTS_CONF, PORTS_CONF + '.orig')
except IOError as error_msg:
_msg = "Failed to save the original %s: %s" % (PORTS_CONF, error_msg)
module.fail_json(msg=_msg)
return # for testing only
def write_to_ports_conf(module):
"""
use tempfile to first write out config in temp file
then write to actual location. may help prevent file
corruption. Ports.conf is a critical file for Cumulus.
Don't want to corrupt this file under any circumstance.
"""
temp = tempfile.NamedTemporaryFile()
try:
temp.write('# Managed By Ansible\n')
for k in sorted(module.ports_conf_hash.keys()):
port_setting = module.ports_conf_hash[k]
_str = "%s=%s\n" % (k, port_setting)
temp.write(_str)
temp.seek(0)
shutil.copyfile(temp.name, PORTS_CONF)
except IOError as error_msg:
module.fail_json(
msg="Failed to write to %s: %s" % (PORTS_CONF, error_msg))
finally:
temp.close()
def main():
module = AnsibleModule(
argument_spec=dict(
speed_40g_div_4=dict(type='list'),
speed_4_by_10g=dict(type='list'),
speed_10g=dict(type='list'),
speed_40g=dict(type='list')
),
required_one_of=[['speed_40g_div_4',
'speed_4_by_10g',
'speed_10g',
'speed_40g']]
)
_changed = False
hash_existing_ports_conf(module)
generate_new_ports_conf_hash(module)
if compare_new_and_old_port_conf_hash(module):
make_copy_of_orig_ports_conf(module)
write_to_ports_conf(module)
_changed = True
_msg = "/etc/cumulus/ports.conf changed"
else:
_msg = 'No change in /etc/ports.conf'
module.exit_json(changed=_changed, msg=_msg)
# import module snippets
from ansible.module_utils.basic import *
# from ansible.module_utils.urls import *
import os
import tempfile
import shutil
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package vault
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/armon/go-metrics"
"github.com/go-jose/go-jose/v3"
"github.com/go-jose/go-jose/v3/jwt"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/metricsutil"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/sdk/helper/certutil"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/logical"
)
const (
// The location of the key used to generate response-wrapping JWTs
coreWrappingJWTKeyPath = "core/wrapping/jwtkey"
)
func (c *Core) ensureWrappingKey(ctx context.Context) error {
entry, err := c.barrier.Get(ctx, coreWrappingJWTKeyPath)
if err != nil {
return err
}
var keyParams certutil.ClusterKeyParams
if entry == nil {
key, err := ecdsa.GenerateKey(elliptic.P521(), c.secureRandomReader)
if err != nil {
return fmt.Errorf("failed to generate wrapping key: %w", err)
}
keyParams.D = key.D
keyParams.X = key.X
keyParams.Y = key.Y
keyParams.Type = corePrivateKeyTypeP521
val, err := jsonutil.EncodeJSON(keyParams)
if err != nil {
return fmt.Errorf("failed to encode wrapping key: %w", err)
}
entry = &logical.StorageEntry{
Key: coreWrappingJWTKeyPath,
Value: val,
}
if err = c.barrier.Put(ctx, entry); err != nil {
return fmt.Errorf("failed to store wrapping key: %w", err)
}
}
// Redundant if we just created it, but in this case serves as a check anyways
if err = jsonutil.DecodeJSON(entry.Value, &keyParams); err != nil {
return fmt.Errorf("failed to decode wrapping key parameters: %w", err)
}
c.wrappingJWTKey = &ecdsa.PrivateKey{
PublicKey: ecdsa.PublicKey{
Curve: elliptic.P521(),
X: keyParams.X,
Y: keyParams.Y,
},
D: keyParams.D,
}
c.logger.Info("loaded wrapping token key")
return nil
}
// wrapInCubbyhole is invoked when a caller asks for response wrapping.
// On success, return (nil, nil) and mutates resp. On failure, returns
// either a response describing the failure or an error.
func (c *Core) wrapInCubbyhole(ctx context.Context, req *logical.Request, resp *logical.Response, auth *logical.Auth) (*logical.Response, error) {
if c.perfStandby {
return forwardWrapRequest(ctx, c, req, resp, auth)
}
// Before wrapping, obey special rules for listing: if no entries are
// found, 404. This prevents unwrapping only to find empty data.
if req.Operation == logical.ListOperation {
if resp == nil || (len(resp.Data) == 0 && len(resp.Warnings) == 0) {
return nil, logical.ErrUnsupportedPath
}
keysRaw, ok := resp.Data["keys"]
if !ok || keysRaw == nil {
if len(resp.Data) > 0 || len(resp.Warnings) > 0 {
// We could be returning extra metadata on a list, or returning
// warnings with no data, so handle these cases
goto DONELISTHANDLING
}
return nil, logical.ErrUnsupportedPath
}
keys, ok := keysRaw.([]string)
if !ok {
return nil, logical.ErrUnsupportedPath
}
if len(keys) == 0 {
return nil, logical.ErrUnsupportedPath
}
}
DONELISTHANDLING:
var err error
sealWrap := resp.WrapInfo.SealWrap
var ns *namespace.Namespace
// If we are creating a JWT wrapping token we always want them to live in
// the root namespace. These are only used for replication and plugin setup.
switch resp.WrapInfo.Format {
case "jwt":
ns = namespace.RootNamespace
ctx = namespace.ContextWithNamespace(ctx, ns)
default:
ns, err = namespace.FromContext(ctx)
if err != nil {
return nil, err
}
}
// If the response is from a snapshot read or list, we need to make sure
// that hte wrapped value is written to real storage, not to the snapshot
// storage
if req.IsSnapshotReadOrList() {
ctx = logical.CreateContextWithSnapshotID(ctx, "")
}
// If we are wrapping, the first part (performed in this functions) happens
// before auditing so that resp.WrapInfo.Token can contain the HMAC'd
// wrapping token ID in the audit logs, so that it can be determined from
// the audit logs whether the token was ever actually used.
creationTime := time.Now()
te := logical.TokenEntry{
Path: req.Path,
Policies: []string{"response-wrapping"},
CreationTime: creationTime.Unix(),
TTL: resp.WrapInfo.TTL,
NumUses: 1,
ExplicitMaxTTL: resp.WrapInfo.TTL,
NamespaceID: ns.ID,
}
if err := c.CreateToken(ctx, &te); err != nil {
c.logger.Error("failed to create wrapping token", "error", err)
return nil, ErrInternalError
}
// Count the successful token creation
ttl_label := metricsutil.TTLBucket(resp.WrapInfo.TTL)
mountPointWithoutNs := ns.TrimmedPath(req.MountPoint)
c.metricSink.IncrCounterWithLabels(
[]string{"token", "creation"},
1,
[]metrics.Label{
metricsutil.NamespaceLabel(ns),
// The type of the secret engine is not all that useful;
// we could use "token" but let's be more descriptive,
// even if it's not a real auth method.
{"auth_method", "response_wrapping"},
{"mount_point", mountPointWithoutNs},
{"creation_ttl", ttl_label},
// *Should* be service, but let's use whatever create() did..
{"token_type", te.Type.String()},
},
)
resp.WrapInfo.Token = te.ExternalID
resp.WrapInfo.Accessor = te.Accessor
resp.WrapInfo.CreationTime = creationTime
// If this is not a rewrap, store the request path as creation_path
if req.Path != "sys/wrapping/rewrap" {
resp.WrapInfo.CreationPath = req.Path
}
if auth != nil && auth.EntityID != "" {
resp.WrapInfo.WrappedEntityID = auth.EntityID
}
// This will only be non-nil if this response contains a token, so in that
// case put the accessor in the wrap info.
if resp.Auth != nil {
resp.WrapInfo.WrappedAccessor = resp.Auth.Accessor
}
// Store the accessor of the approle secret in WrappedAccessor
if secretIdAccessor, ok := resp.Data["secret_id_accessor"]; ok && resp.Auth == nil && req.MountType == "approle" {
resp.WrapInfo.WrappedAccessor = secretIdAccessor.(string)
}
switch resp.WrapInfo.Format {
case "jwt":
// Create the JWT
claims := jwt.Claims{
// Map the JWT ID to the token ID for ease of use
ID: te.ID,
// Set the issue time to the creation time
IssuedAt: jwt.NewNumericDate(creationTime),
// Set the expiration to the TTL
Expiry: jwt.NewNumericDate(creationTime.Add(resp.WrapInfo.TTL)),
// Set a reasonable not-before time; since unwrapping happens on this
// node we shouldn't have to worry much about drift
NotBefore: jwt.NewNumericDate(time.Now().Add(-5 * time.Second)),
}
type privateClaims struct {
Accessor string `json:"accessor"`
Type string `json:"type"`
Addr string `json:"addr"`
}
priClaims := &privateClaims{
Type: "wrapping",
Addr: c.redirectAddr,
}
if resp.Auth != nil {
priClaims.Accessor = resp.Auth.Accessor
}
sig, err := jose.NewSigner(
jose.SigningKey{Algorithm: jose.SignatureAlgorithm(api.CubbyHoleJWTSignatureAlgorithm), Key: c.wrappingJWTKey},
(&jose.SignerOptions{}).WithType("JWT"))
if err != nil {
c.tokenStore.revokeOrphan(ctx, te.ID)
c.logger.Error("failed to create JWT builder", "error", err)
return nil, ErrInternalError
}
ser, err := jwt.Signed(sig).Claims(claims).Claims(priClaims).CompactSerialize()
if err != nil {
c.tokenStore.revokeOrphan(ctx, te.ID)
c.logger.Error("failed to serialize JWT", "error", err)
return nil, ErrInternalError
}
resp.WrapInfo.Token = ser
if c.redirectAddr == "" {
resp.AddWarning("No redirect address set in Vault so none could be encoded in the token. You may need to supply Vault's API address when unwrapping the token.")
}
}
cubbyReq := &logical.Request{
Operation: logical.CreateOperation,
Path: "cubbyhole/response",
ClientToken: te.ID,
}
if sealWrap {
cubbyReq.WrapInfo = &logical.RequestWrapInfo{
SealWrap: true,
}
}
cubbyReq.SetTokenEntry(&te)
// During a rewrap, store the original response, don't wrap it again.
if req.Path == "sys/wrapping/rewrap" {
cubbyReq.Data = map[string]interface{}{
"response": resp.Data["response"],
}
} else {
httpResponse := logical.LogicalResponseToHTTPResponse(resp)
// Add the unique identifier of the original request to the response
httpResponse.RequestID = req.ID
// Because of the way that JSON encodes (likely just in Go) we actually get
// mixed-up values for ints if we simply put this object in the response
// and encode the whole thing; so instead we marshal it first, then store
// the string response. This actually ends up making it easier on the
// client side, too, as it becomes a straight read-string-pass-to-unmarshal
// operation.
marshaledResponse, err := json.Marshal(httpResponse)
if err != nil {
c.tokenStore.revokeOrphan(ctx, te.ID)
c.logger.Error("failed to marshal wrapped response", "error", err)
return nil, ErrInternalError
}
cubbyReq.Data = map[string]interface{}{
"response": string(marshaledResponse),
}
}
cubbyResp, err := c.router.Route(ctx, cubbyReq)
if err != nil {
// Revoke since it's not yet being tracked for expiration
c.tokenStore.revokeOrphan(ctx, te.ID)
c.logger.Error("failed to store wrapped response information", "error", err)
return nil, ErrInternalError
}
if cubbyResp != nil && cubbyResp.IsError() {
c.tokenStore.revokeOrphan(ctx, te.ID)
c.logger.Error("failed to store wrapped response information", "error", cubbyResp.Data["error"])
return cubbyResp, nil
}
// Store info for lookup
cubbyReq.WrapInfo = nil
cubbyReq.Path = "cubbyhole/wrapinfo"
cubbyReq.Data = map[string]interface{}{
"creation_ttl": resp.WrapInfo.TTL,
"creation_time": creationTime,
}
// Store creation_path if not a rewrap
if req.Path != "sys/wrapping/rewrap" {
cubbyReq.Data["creation_path"] = req.Path
} else {
cubbyReq.Data["creation_path"] = resp.WrapInfo.CreationPath
}
cubbyResp, err = c.router.Route(ctx, cubbyReq)
if err != nil {
// Revoke since it's not yet being tracked for expiration
c.tokenStore.revokeOrphan(ctx, te.ID)
c.logger.Error("failed to store wrapping information", "error", err)
return nil, ErrInternalError
}
if cubbyResp != nil && cubbyResp.IsError() {
c.tokenStore.revokeOrphan(ctx, te.ID)
c.logger.Error("failed to store wrapping information", "error", cubbyResp.Data["error"])
return cubbyResp, nil
}
wAuth := &logical.Auth{
ClientToken: te.ID,
Policies: []string{"response-wrapping"},
LeaseOptions: logical.LeaseOptions{
TTL: te.TTL,
Renewable: false,
},
}
// Register the wrapped token with the expiration manager. We skip the role
// lookup here as we are not logging in, and only logins apply to role based quotas.
if err := c.expiration.RegisterAuth(ctx, &te, wAuth, ""); err != nil {
// Revoke since it's not yet being tracked for expiration
c.tokenStore.revokeOrphan(ctx, te.ID)
c.logger.Error("failed to register cubbyhole wrapping token lease", "request_path", req.Path, "error", err)
return nil, ErrInternalError
}
return nil, nil
}
// validateWrappingToken checks whether a token is a wrapping token. The passed
// in logical request will be updated if the wrapping token was provided within
// a JWT token.
func (c *Core) validateWrappingToken(ctx context.Context, req *logical.Request) (valid bool, err error) {
if req == nil {
return false, fmt.Errorf("invalid request")
}
if c.Sealed() {
return false, consts.ErrSealed
}
if c.standby && !c.perfStandby {
return false, consts.ErrStandby
}
defer func() {
// Perform audit logging before returning if there's an issue with checking
// the wrapping token
if err != nil || !valid {
// We log the Auth object like so here since the wrapping token can
// come from the header, which gets set as the ClientToken
auth := &logical.Auth{
ClientToken: req.ClientToken,
Accessor: req.ClientTokenAccessor,
}
logInput := &logical.LogInput{
Auth: auth,
Request: req,
}
if err != nil {
logInput.OuterErr = errors.New("error validating wrapping token")
}
if !valid {
logInput.OuterErr = consts.ErrInvalidWrappingToken
}
if err := c.auditBroker.LogRequest(ctx, logInput); err != nil {
c.logger.Error("failed to audit request", "path", req.Path, "error", err)
}
}
}()
var token string
var thirdParty bool
// Check if the wrapping token is coming from the request body, and if not
// assume that req.ClientToken is the wrapping token
if req.Data != nil && req.Data["token"] != nil {
thirdParty = true
if tokenStr, ok := req.Data["token"].(string); !ok {
return false, fmt.Errorf("could not decode token in request body")
} else if tokenStr == "" {
return false, fmt.Errorf("empty token in request body")
} else {
token = tokenStr
}
} else {
token = req.ClientToken
}
// Check for it being a JWT. If it is, and it is valid, we extract the
// internal client token from it and use that during lookup. The second
// check is a quick check to verify that we don't consider a namespaced
// token to be a JWT -- namespaced tokens have two dots too, but Vault
// token types (for now at least) begin with a letter representing a type
// and then a dot.
if IsJWT(token) {
// Implement the jose library way
parsedJWT, err := jwt.ParseSigned(token)
if err != nil {
return false, fmt.Errorf("wrapping token could not be parsed: %w", err)
}
var claims jwt.Claims
allClaims := make(map[string]interface{})
if err = parsedJWT.Claims(&c.wrappingJWTKey.PublicKey, &claims, &allClaims); err != nil {
return false, fmt.Errorf("wrapping token signature could not be validated: %w", err)
}
typeClaimRaw, ok := allClaims["type"]
if !ok {
return false, errors.New("could not validate type claim")
}
typeClaim, ok := typeClaimRaw.(string)
if !ok {
return false, errors.New("could not parse type claim")
}
if typeClaim != "wrapping" {
return false, errors.New("unexpected type claim")
}
if !thirdParty {
req.ClientToken = claims.ID
} else {
req.Data["token"] = claims.ID
}
token = claims.ID
}
if token == "" {
return false, fmt.Errorf("token is empty")
}
te, err := c.tokenStore.Lookup(ctx, token)
if err != nil {
return false, err
}
if te == nil {
return false, nil
}
if !IsWrappingToken(te) {
return false, nil
}
if !thirdParty {
req.ClientTokenAccessor = te.Accessor
req.ClientTokenRemainingUses = te.NumUses
req.SetTokenEntry(te)
}
return true, nil
}
func IsWrappingToken(te *logical.TokenEntry) bool {
if len(te.Policies) != 1 {
return false
}
if te.Policies[0] != responseWrappingPolicyName && te.Policies[0] != controlGroupPolicyName {
return false
}
return true
}
|
go
|
github
|
https://github.com/hashicorp/vault
|
vault/wrapping.go
|
'''
This module defines :class:`Block`, the main container gathering all the data,
whether discrete or continous, for a given recording session. base class
used by all :module:`neo.core` classes.
:class:`Block` derives from :class:`Container`,
from :module:`neo.core.container`.
'''
from datetime import datetime
from neo.core.container import Container, unique_objs
class Block(Container):
'''
Main container gathering all the data, whether discrete or continous, for a
given recording session.
A block is not necessarily temporally homogeneous, in contrast to :class:`Segment`.
*Usage*::
>>> from neo.core import Block, Segment, Group, AnalogSignal
>>> from quantities import nA, kHz
>>> import numpy as np
>>>
>>> # create a Block with 3 Segment and 2 Group objects
,,, blk = Block()
>>> for ind in range(3):
... seg = Segment(name='segment %d' % ind, index=ind)
... blk.segments.append(seg)
...
>>> for ind in range(2):
... group = Group(name='Array probe %d' % ind)
... blk.groups.append(group)
...
>>> # Populate the Block with AnalogSignal objects
... for seg in blk.segments:
... for group in blk.groups:
... a = AnalogSignal(np.random.randn(10000, 64)*nA,
... sampling_rate=10*kHz)
... group.analogsignals.append(a)
... seg.analogsignals.append(a)
*Required attributes/properties*:
None
*Recommended attributes/properties*:
:name: (str) A label for the dataset.
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
:file_datetime: (datetime) The creation date and time of the original
data file.
:rec_datetime: (datetime) The date and time of the original recording.
*Properties available on this object*:
:list_units: (deprecated) descends through hierarchy and returns a list of
:class:`Unit` objects existing in the block. This shortcut exists
because a common analysis case is analyzing all neurons that
you recorded in a session.
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Container of*:
:class:`Segment`
:class:`Group`
:class:`ChannelIndex` (deprecated)
'''
_container_child_objects = ('Segment', 'ChannelIndex', 'Group')
_child_properties = ('Unit',)
_recommended_attrs = ((('file_datetime', datetime),
('rec_datetime', datetime),
('index', int)) +
Container._recommended_attrs)
_repr_pretty_attrs_keys_ = (Container._repr_pretty_attrs_keys_ +
('file_origin', 'file_datetime',
'rec_datetime', 'index'))
_repr_pretty_containers = ('segments',)
def __init__(self, name=None, description=None, file_origin=None,
file_datetime=None, rec_datetime=None, index=None,
**annotations):
'''
Initalize a new :class:`Block` instance.
'''
super().__init__(name=name, description=description,
file_origin=file_origin, **annotations)
self.file_datetime = file_datetime
self.rec_datetime = rec_datetime
self.index = index
self.regionsofinterest = [] # temporary workaround.
# the goal is to store all sub-classes of RegionOfInterest in a single list
# but this will need substantial changes to container handling
@property
def data_children_recur(self):
'''
All data child objects stored in the current object,
obtained recursively.
'''
# subclassing this to remove duplicate objects such as SpikeTrain
# objects in both Segment and Unit
# Only Block can have duplicate items right now, so implement
# this here for performance reasons.
return tuple(unique_objs(super().data_children_recur))
def list_children_by_class(self, cls):
'''
List all children of a particular class recursively.
You can either provide a class object, a class name,
or the name of the container storing the class.
'''
# subclassing this to remove duplicate objects such as SpikeTrain
# objects in both Segment and Unit
# Only Block can have duplicate items right now, so implement
# this here for performance reasons.
return unique_objs(super().list_children_by_class(cls))
@property
def list_units(self):
'''
Return a list of all :class:`Unit` objects in the :class:`Block`.
'''
return self.list_children_by_class('unit')
|
unknown
|
codeparrot/codeparrot-clean
| ||
""" CS4HS Website Generator
AUTHOR: Jack Morgan
REQUIRES: Python >= 3.4.1
"""
CURRENT_DIRECTORY = '.'
OUTPUT_DIRECTORY = './output/'
TEXT_FOLDER = './text/'
FOLDERS_TO_COPY = ['css', 'files', 'img', 'js']
"""Check and install dependencies"""
import pip
# Update pip if needed and install dependencies
pip.main(['install', 'pip>=7.0.3'])
pip.main(['install', 'jinja2>=2.7.3'])
import os
import os.path
import shutil
import argparse
from jinja2 import Environment, FileSystemLoader
class WebsiteGenerator:
"""Object for generating CS4HS website"""
def __init__(self):
# Load files from this folder and templates folder
self.env = Environment(loader=FileSystemLoader([CURRENT_DIRECTORY, 'templates/']))
def render_html(self, template):
"""Return a rendered template"""
return self.env.get_template(template).render()
def write_html(html, file):
"""Render each file to output folder"""
file_name = os.path.join(OUTPUT_DIRECTORY, file)
try:
with open(file_name, 'w', encoding='utf8') as output_file:
output_file.write(html)
print('Created {}'.format(file))
os.chmod(file_name, 0o644)
except:
print("Cannot write {0}".format(file))
def copy_files():
"""Copy all required files to destination folder"""
for folder in FOLDERS_TO_COPY:
src_folder = os.path.join(CURRENT_DIRECTORY, folder)
dest_folder = os.path.join(OUTPUT_DIRECTORY, folder)
if os.path.exists(dest_folder):
shutil.rmtree(dest_folder)
shutil.copytree(src_folder, dest_folder)
os.chmod(dest_folder, 0o2775)
apply_file_permissions_to_folder(dest_folder)
print("Copied {} folder".format(folder))
def apply_file_permissions_to_folder(folder_name):
for root, folders, files in os.walk(folder_name):
for folder in folders:
folder_path = os.path.join(root, folder)
os.chmod(folder_path, 0o2775)
for file_name in files:
file_path = os.path.join(root, file_name)
os.chmod(file_path, 0o644)
def command_line_args():
"""Setup arg parser, and add required argument handling. Return
namespace generated by parser arguments
"""
argsparser = argparse.ArgumentParser(description='CS4HS Generator Argument')
argsparser.add_argument('--pre-conference', '-p',
dest='pre_conference',
action='store_true',
help='Creates only index page for pre-conference')
return argsparser.parse_args()
def main():
"""Create template engine and process all HTML files
in the top directory"""
cmd_args = command_line_args()
website_generator = WebsiteGenerator()
if cmd_args.pre_conference:
files = ['pre-index.html']
else:
files = os.listdir(TEXT_FOLDER)
files.remove('pre-index.html')
# Render all HTML files in top directory
for file in files:
if file.endswith('.html'):
file_path = os.path.join(TEXT_FOLDER, file)
html = website_generator.render_html(file_path)
if cmd_args.pre_conference:
write_html(html, 'index.html')
else:
write_html(html, file)
copy_files()
if __name__ == "__main__":
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Iterators providing indices for different kinds of iteration over
datasets.
Presets:
- sequential: iterates through fixed slices of the dataset in sequence
- shuffled_sequential: iterates through a shuffled version of the dataset
in sequence
- random_slice: on each call to next, returns a slice of the dataset,
chosen uniformly at random over contiguous slices.
Samples with replacement, but still reports that
container is empty after num_examples / batch_size calls
- random_uniform: on each call to next, returns a random subset of the
dataset. Samples with replacement, but still reports that
container is empty after num_examples / batch_size calls
"""
from __future__ import division
import warnings
import numpy as np
from theano.compat import six
from pylearn2.space import CompositeSpace
from pylearn2.utils import safe_izip, wraps
from pylearn2.utils.data_specs import is_flat_specs
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.rng import make_np_rng
# Make sure that the docstring uses restructured text list format.
# If you change the module-level docstring, please re-run
# pylearn2/doc/scripts/docgen.py and make sure sphinx doesn't issue any
# warnings for this file.
# This particular docstring was being frequently broken prior to the
# addition of this test.
# TODO: have nosetests run docgen.py in warning=error mode, remove
# tests for specific conditions
assert """Presets:
- sequential: iterates through fixed slices of the dataset in sequence
- s""" in __doc__
class SubsetIterator(object):
"""
An iterator that returns slices or lists of indices into a dataset
of a given fixed size.
Parameters
----------
dataset_size : int
The number of examples, total, in the dataset.
batch_size : int, optional
The (typical/maximum) number of examples per batch. Less
may be returned in the very last batch if batch size
does not evenly divide `dataset_size`.
num_batches : int, optional
The number of batches to return. Needn't be specified
if `batch_size` is specified. If both `batch_size` and
`num_batches` are specified then it must be true that
`batch_size * num_batches <= dataset_size`.
rng : `np.random.RandomState` or seed, optional
A `np.random.RandomState` object or the seed to be
used to create one. A deterministic default seed is
used otherwise.
"""
# This breaks the doc generation, so until we figure out why, not in the
# docstring.
#
# Attributes
# ----------
# batch_size : int
# num_batches : int
# num_examples : int
# uneven : bool
# fancy : bool
# `True` if this iterator produces lists of indices,
# `False` if it produces slices.
# stochastic : bool
# `True` if this iterator makes use of the random number
# generator, and will therefore produce different sequences
# depending on the RNG state. `False` otherwise.
def __init__(self, dataset_size, batch_size=None,
num_batches=None, rng=None):
raise NotImplementedError()
def next(self):
"""
Retrieves description of the next batch of examples.
Returns
-------
next_batch : `slice` or list of int
An object describing the indices in the dataset of
a batch of data. Either a `slice` object or a list
of integers specifying individual indices of
examples.
Raises
------
StopIteration
When there are no more batches to return.
"""
raise NotImplementedError()
def __next__(self):
self.next()
def __iter__(self):
return self
# Does this return subsets that need fancy indexing? (i.e. lists
# of indices)
fancy = False
# Does this class make use of random number generators?
stochastic = False
# Does it ensure that every batch has the same size?
uniform_batch_size = False
@property
def batch_size(self):
"""
The (maximum) number of examples in each batch.
Returns
-------
batch_size : int
The (maximum) number of examples in each batch. This is
either as specified via the constructor, or inferred from
the dataset size and the number of batches requested.
"""
return self._batch_size
@property
def num_batches(self):
"""
The total number of batches that the iterator will ever return.
Returns
-------
num_batches : int
The total number of batches the iterator will ever return.
This is either as specified via the constructor, or
inferred from the dataset size and the batch size.
"""
return self._num_batches
@property
def num_examples(self):
"""
The total number of examples over which the iterator operates.
Returns
-------
num_examples : int
The total number of examples over which the iterator operates.
May be less than the dataset size.
"""
return self.batch_size * self.num_batches
@property
def uneven(self):
"""
Whether every batch will be the same size.
Returns
-------
uneven : bool
`True` if returned batches may be of differing sizes,
`False` otherwise.
"""
raise NotImplementedError()
class ForcedEvenIterator(SubsetIterator):
"""
A class which wraps other iterators to ensure equal batch size.
This class needs to be completed using type() metaclass, see
Examples section to see how to use it.
Parameters
----------
dataset_size : int
Total number of examples in the dataset
batch_size : int or None
The size of the batches.
If set to None and num_batches is defined, batch_size will be
calculated based on dataset_size.
num_batches : int or None
The number of batch in the dataset.
If set to None and batch_size is defined, num_batches will be
calculated based on dataset_size.
*args : Variable length argument list for _base_iterator_cls
**kwargs : Arbitrary keyword arguments for _base_iterator_cls
Notes
-----
This class can not be initialized because it needs to be completed
using type() metaclass. See Examples section for more details.
Batches of size unequal to batch_size will be discarded. Those
examples will never be visited.
Examples
--------
>>> dct = ForcedEvenIterator.__dict__.copy()
>>> dct["_base_iterator_cls"] = SequentialSubsetIterator
>>> dct["fancy"] = SequentialSubsetIterator.fancy
>>> dct["stochastic"] = SequentialSubsetIterator.stochastic
>>>
>>> NewForcedEvenClass = type("ForcedEvenDummyIterator",
... ForcedEvenIterator.__bases__, dct)
>>>
>>> even_iterator = NewForcedEvenClass(dataset_size=100,
... batch_size=30, num_batches=None)
For a shortcut use function as_even()
>>> NewForcedEvenClass = as_even(SequentialSubsetIterator)
>>> even_iterator = NewForcedEvenClass(dataset_size=100,
... batch_size=30, num_batches=None)
"""
def __init__(self, dataset_size, batch_size, num_batches, *args, **kwargs):
if self.fancy is None or self.stochastic is None or \
self._base_iterator_cls is None:
raise ValueError("You must pre-define fancy, stochastic and "
"_base_iterator_cls arguments by creating a new "
"class using the metaclass type()."
"See function as_even() for an example.")
if batch_size is None:
if num_batches is not None:
batch_size = int(dataset_size / num_batches)
else:
raise ValueError("need one of batch_size, num_batches "
"for sequential batch iteration")
elif batch_size is not None:
if num_batches is not None:
max_num_batches = int(dataset_size / batch_size)
if num_batches > max_num_batches:
raise ValueError("dataset of %d examples can only provide "
"%d batches of equal size with batch_size"
" %d, but %d batches were requested" %
(dataset_size, max_num_batches,
batch_size, num_batches))
else:
num_batches = int(dataset_size / batch_size)
self._base_iterator = self._base_iterator_cls(dataset_size, batch_size,
num_batches, *args,
**kwargs)
# Does it ensure that every batch has the same size?
uniform_batch_size = True
# Does this return subsets that need fancy indexing? (i.e. lists
# of indices)
# Needs to be set before initialization. See Examples section in class docs
fancy = None
# Does this class make use of random number generators?
# Needs to be set before initialization. See Examples section in class docs
stochastic = None
# base iterator that ForcedEvenIterator class wraps
# Needs to be set before initialization. See Examples section in class docs
_base_iterator_cls = None
@property
def _dataset_size(self):
return self._base_iterator._dataset_size
@property
def _batch_size(self):
return self._base_iterator._batch_size
@property
def _num_batches(self):
return self._base_iterator._num_batches
@property
def num_examples(self):
"""
Number of examples that will be visited
by the iterator. (May be lower than dataset_size)
"""
product = self.batch_size * self.num_batches
if product > self._dataset_size:
return self.batch_size * (self.num_batches - 1)
else:
return product
def next(self):
"""
Returns next batch of _base_iterator
Raises
------
StopException
When _base_iterator reachs the end of the dataset
Notes
-----
Uneven batches may be discarded and StopException
will be raised without having iterated throught
every examples.
"""
length = -1
# check if the batch has wrong length, throw it away
while length != self.batch_size:
batch = self._base_iterator.next()
if isinstance(batch, slice):
length = batch.stop-batch.start
else:
length = len(batch)
return batch
def __next__(self):
return self.next()
def as_even(iterator_cls):
"""
Returns a class wrapping iterator_cls that forces equal batch size.
Parameters
----------
iterator_cls : class
An iterator class that inherits from SubsetIterator
Returns
-------
class
An iterator class ForcedEven{put the name of iterator_cls here}, based
on ForcedEvenIterator, that wraps iterator_cls.
"""
assert issubclass(iterator_cls, SubsetIterator)
dct = ForcedEvenIterator.__dict__.copy()
dct["_base_iterator_cls"] = iterator_cls
dct["fancy"] = iterator_cls.fancy
dct["stochastic"] = iterator_cls.stochastic
NewForcedEvenClass = type("ForcedEven%s" % iterator_cls.__name__,
ForcedEvenIterator.__bases__, dct)
return NewForcedEvenClass
class SequentialSubsetIterator(SubsetIterator):
"""
Returns mini-batches proceeding sequentially through the dataset.
Notes
-----
Returns slice objects to represent ranges of indices (`fancy = False`).
See :py:class:`SubsetIterator` for detailed constructor parameter
and attribute documentation.
"""
def __init__(self, dataset_size, batch_size, num_batches, rng=None):
if rng is not None:
raise ValueError("non-None rng argument not supported for "
"sequential batch iteration")
assert num_batches is None or num_batches >= 0
self._dataset_size = dataset_size
if batch_size is None:
if num_batches is not None:
batch_size = int(np.ceil(self._dataset_size / num_batches))
else:
raise ValueError("need one of batch_size, num_batches "
"for sequential batch iteration")
elif batch_size is not None:
if num_batches is not None:
max_num_batches = np.ceil(self._dataset_size / batch_size)
if num_batches > max_num_batches:
raise ValueError("dataset of %d examples can only provide "
"%d batches with batch_size %d, but %d "
"batches were requested" %
(self._dataset_size, max_num_batches,
batch_size, num_batches))
else:
num_batches = np.ceil(self._dataset_size / batch_size)
self._batch_size = batch_size
self._num_batches = num_batches
self._next_batch_no = 0
self._idx = 0
self._batch = 0
@wraps(SubsetIterator.next, assigned=(), updated=())
def next(self):
if self._batch >= self.num_batches or self._idx >= self._dataset_size:
raise StopIteration()
# this fix the problem where dataset_size % batch_size != 0
elif (self._idx + self._batch_size) > self._dataset_size:
self._last = slice(self._idx, self._dataset_size)
self._idx = self._dataset_size
return self._last
else:
self._last = slice(self._idx, self._idx + self._batch_size)
self._idx += self._batch_size
self._batch += 1
return self._last
def __next__(self):
return self.next()
fancy = False
stochastic = False
uniform_batch_size = False
@property
@wraps(SubsetIterator.num_examples, assigned=(), updated=())
def num_examples(self):
product = self.batch_size * self.num_batches
return min(product, self._dataset_size)
@property
@wraps(SubsetIterator.uneven, assigned=(), updated=())
def uneven(self):
return self.batch_size * self.num_batches > self._dataset_size
class ShuffledSequentialSubsetIterator(SequentialSubsetIterator):
"""
Randomly shuffles the example indices and then proceeds sequentially
through the permutation.
Notes
-----
Returns lists of indices (`fancy = True`).
See :py:class:`SubsetIterator` for detailed constructor parameter
and attribute documentation.
"""
stochastic = True
fancy = True
uniform_batch_size = False
def __init__(self, dataset_size, batch_size, num_batches, rng=None):
super(ShuffledSequentialSubsetIterator, self).__init__(
dataset_size,
batch_size,
num_batches,
None
)
self._rng = make_np_rng(rng, which_method=["random_integers",
"shuffle"])
self._shuffled = np.arange(self._dataset_size)
self._rng.shuffle(self._shuffled)
@wraps(SubsetIterator.next)
def next(self):
if self._batch >= self.num_batches or self._idx >= self._dataset_size:
raise StopIteration()
# this fix the problem where dataset_size % batch_size != 0
elif (self._idx + self._batch_size) > self._dataset_size:
rval = self._shuffled[self._idx: self._dataset_size]
self._idx = self._dataset_size
return rval
else:
rval = self._shuffled[self._idx: self._idx + self._batch_size]
self._idx += self._batch_size
self._batch += 1
return rval
def __next__(self):
return self.next()
class RandomUniformSubsetIterator(SubsetIterator):
"""
Selects minibatches of examples by drawing indices uniformly
at random, with replacement.
Notes
-----
Returns lists of indices (`fancy = True`).
See :py:class:`SubsetIterator` for detailed constructor parameter
and attribute documentation.
"""
def __init__(self, dataset_size, batch_size, num_batches, rng=None):
self._rng = make_np_rng(rng, which_method=["random_integers",
"shuffle"])
if batch_size is None:
raise ValueError("batch_size cannot be None for random uniform "
"iteration")
elif num_batches is None:
raise ValueError("num_batches cannot be None for random uniform "
"iteration")
self._dataset_size = dataset_size
self._batch_size = batch_size
self._num_batches = num_batches
self._next_batch_no = 0
@wraps(SubsetIterator.next)
def next(self):
if self._next_batch_no >= self._num_batches:
raise StopIteration()
else:
self._last = self._rng.random_integers(low=0,
high=self._dataset_size - 1,
size=(self._batch_size,))
self._next_batch_no += 1
return self._last
def __next__(self):
return self.next()
fancy = True
stochastic = True
uniform_batch_size = True
class RandomSliceSubsetIterator(RandomUniformSubsetIterator):
"""
Returns minibatches that are randomly selected contiguous slices in
index space.
Notes
-----
Returns slice objects to represent ranges of indices (`fancy = False`).
See :py:class:`SubsetIterator` for detailed constructor parameter
and attribute documentation.
"""
def __init__(self, dataset_size, batch_size, num_batches, rng=None):
if batch_size is None:
raise ValueError("batch_size cannot be None for random slice "
"iteration")
elif num_batches is None:
raise ValueError("num_batches cannot be None for random slice "
"iteration")
super(RandomSliceSubsetIterator, self).__init__(dataset_size,
batch_size,
num_batches, rng)
self._last_start = self._dataset_size - self._batch_size
if self._last_start < 0:
raise ValueError("batch_size > dataset_size not supported for "
"random slice iteration")
@wraps(SubsetIterator.next)
def next(self):
if self._next_batch_no >= self._num_batches:
raise StopIteration()
else:
start = self._rng.random_integers(low=0, high=self._last_start)
self._last = slice(start, start + self._batch_size)
self._next_batch_no += 1
return self._last
def __next__(self):
return self.next()
fancy = False
stochastic = True
uniform_batch_size = True
class BatchwiseShuffledSequentialIterator(SequentialSubsetIterator):
"""
Returns minibatches randomly, but sequential inside each minibatch.
Notes
-----
Returns slice objects to represent ranges of indices (`fancy = False`).
See :py:class:`SubsetIterator` for detailed constructor parameter
and attribute documentation.
"""
def __init__(self, dataset_size, batch_size, num_batches=None, rng=None):
self._rng = make_np_rng(rng, which_method=["random_integers",
"shuffle"])
assert num_batches is None or num_batches >= 0
self._dataset_size = dataset_size
if batch_size is None:
if num_batches is not None:
batch_size = int(np.ceil(self._dataset_size / num_batches))
else:
raise ValueError("need one of batch_size, num_batches "
"for sequential batch iteration")
elif batch_size is not None:
if num_batches is not None:
max_num_batches = np.ceil(self._dataset_size / batch_size)
if num_batches > max_num_batches:
raise ValueError("dataset of %d examples can only provide "
"%d batches with batch_size %d, but %d "
"batches were requested" %
(self._dataset_size, max_num_batches,
batch_size, num_batches))
else:
num_batches = np.ceil(self._dataset_size / batch_size)
self._batch_size = batch_size
self._num_batches = int(num_batches)
self._next_batch_no = 0
self._idx = 0
self._batch_order = list(range(self._num_batches))
self._rng.shuffle(self._batch_order)
@wraps(SubsetIterator.next)
def next(self):
if self._next_batch_no >= self._num_batches:
raise StopIteration()
else:
start = self._batch_order[self._next_batch_no] * self._batch_size
if start + self._batch_size > self._dataset_size:
self._last = slice(start, self._dataset_size)
else:
self._last = slice(start, start + self._batch_size)
self._next_batch_no += 1
return self._last
def __next__(self):
return self.next()
fancy = False
stochastic = True
uniform_batch_size = False
_iteration_schemes = {
'sequential': SequentialSubsetIterator,
'shuffled_sequential': ShuffledSequentialSubsetIterator,
'random_slice': RandomSliceSubsetIterator,
'random_uniform': RandomUniformSubsetIterator,
'batchwise_shuffled_sequential': BatchwiseShuffledSequentialIterator,
'even_sequential': as_even(SequentialSubsetIterator),
'even_shuffled_sequential': as_even(ShuffledSequentialSubsetIterator),
'even_batchwise_shuffled_sequential':
as_even(BatchwiseShuffledSequentialIterator),
}
def has_uniform_batch_size(mode):
"""
Returns True if the iteration scheme has uniform batch size,
False if not
Parameters
----------
mode: string
A string defining an iteration scheme in _iteration_schemes
Returns
-------
boolean
True if the iteration scheme has uniform batch size,
False otherwise
"""
return resolve_iterator_class(mode).uniform_batch_size
def is_stochastic(mode):
"""
"""
return resolve_iterator_class(mode).stochastic
def resolve_iterator_class(mode):
"""
Map textual representations of default iteration modes to classes.
Parameters
----------
mode : str or class object
If a string, identifier string for the built-in iteration modes.
See the module documentation of :py:mod:`pylearn2.utils.iteration`
for a list of available modes. If a class, it is expected to
be a class that respects the constructor and attribute interface
defined in :py:class:`SubsetIterator`.
Returns
-------
subset_iter_class : class
A class instance (i.e., an instance of type `type`) that
interface defined in :py:class:`SubsetIterator`.
"""
if isinstance(mode, six.string_types) and mode not in _iteration_schemes:
raise ValueError("unknown iteration mode string: %s" % mode)
elif mode in _iteration_schemes:
subset_iter_class = _iteration_schemes[mode]
else:
subset_iter_class = mode
return subset_iter_class
class FiniteDatasetIterator(object):
"""
A wrapper around subset iterators that actually retrieves
data.
Parameters
----------
dataset : `Dataset` object
The dataset over which to iterate.
data_specs : tuple
A `(space, source)` tuple. See :ref:`data_specs` for a full
description. Must not contain nested composite spaces.
subset_iterator : object
An iterator object that returns slice objects or lists of
examples, conforming to the interface specified by
:py:class:`SubsetIterator`.
return_tuple : bool, optional
Always return a tuple, even if there is exactly one source
of data being returned. Defaults to `False`.
convert : list of callables
A list of callables, in the same order as the sources
in `data_specs`, that will be called on the individual
source batches prior to any further processing.
Notes
-----
See the documentation for :py:class:`SubsetIterator` for
attribute documentation.
The dataset should provide a `get` method which accepts a tuple of source
identifiers and a list or slice of indexes and returns a tuple of batches
of examples, one for each source. The old interface using `get_data` is
deprecated and will become unsupported as of July 28, 2015.
"""
def __init__(self, dataset, subset_iterator, data_specs=None,
return_tuple=False, convert=None):
self._data_specs = data_specs
self._dataset = dataset
self._subset_iterator = subset_iterator
self._return_tuple = return_tuple
# Keep only the needed sources in self._raw_data.
# Remember what source they correspond to in self._source
assert is_flat_specs(data_specs)
dataset_space, dataset_source = self._dataset.get_data_specs()
assert is_flat_specs((dataset_space, dataset_source))
# the dataset's data spec is either a single (space, source) pair,
# or a pair of (non-nested CompositeSpace, non-nested tuple).
# We could build a mapping and call flatten(..., return_tuple=True)
# but simply putting spaces, sources and data in tuples is simpler.
if not isinstance(dataset_source, (tuple, list)):
dataset_source = (dataset_source,)
if not isinstance(dataset_space, CompositeSpace):
dataset_sub_spaces = (dataset_space,)
else:
dataset_sub_spaces = dataset_space.components
assert len(dataset_source) == len(dataset_sub_spaces)
space, source = data_specs
if not isinstance(source, tuple):
source = (source,)
if not isinstance(space, CompositeSpace):
sub_spaces = (space,)
else:
sub_spaces = space.components
assert len(source) == len(sub_spaces)
# If `dataset` is incompatible with the new interface, fall back to the
# old interface
if not hasattr(self._dataset, 'get'):
warnings.warn("dataset is using the old iterator interface which "
"is deprecated and will become officially "
"unsupported as of July 28, 2015. The dataset "
"should implement a `get` method respecting the new "
"interface.")
all_data = self._dataset.get_data()
if not isinstance(all_data, tuple):
all_data = (all_data,)
raw_data = []
for s in source:
try:
raw_data.append(all_data[dataset_source.index(s)])
except ValueError as e:
msg = str(e) + '\nThe dataset does not provide '\
'a source with name: ' + s + '.'
reraise_as(ValueError(msg))
self._raw_data = tuple(raw_data)
self._source = source
self._space = sub_spaces
if convert is None:
self._convert = [None for s in source]
else:
assert len(convert) == len(source)
self._convert = convert
for i, (so, sp) in enumerate(safe_izip(source, sub_spaces)):
try:
idx = dataset_source.index(so)
except ValueError as e:
msg = str(e) + '\nThe dataset does not provide '\
'a source with name: ' + so + '.'
reraise_as(ValueError(msg))
dspace = dataset_sub_spaces[idx]
fn = self._convert[i]
# If there is a fn, it is supposed to take care of the formatting,
# and it should be an error if it does not. If there was no fn,
# then the iterator will try to format using the generic
# space-formatting functions.
if fn is None:
# "dspace" and "sp" have to be passed as parameters
# to lambda, in order to capture their current value,
# otherwise they would change in the next iteration
# of the loop.
fn = (lambda batch, dspace=dspace, sp=sp:
dspace.np_format_as(batch, sp))
self._convert[i] = fn
def __iter__(self):
return self
@wraps(SubsetIterator.next)
def next(self):
"""
Retrieves the next batch of examples.
Returns
-------
next_batch : object
An object representing a mini-batch of data, conforming
to the space specified in the `data_specs` constructor
argument to this iterator. Will be a tuple if more
than one data source was specified or if the constructor
parameter `return_tuple` was `True`.
Raises
------
StopIteration
When there are no more batches to return.
"""
next_index = self._subset_iterator.next()
# If the dataset is incompatible with the new interface, fall back to
# the old one
if hasattr(self._dataset, 'get'):
rval = self._next(next_index)
else:
rval = self._fallback_next(next_index)
if not self._return_tuple and len(rval) == 1:
rval, = rval
return rval
def _next(self, next_index):
return tuple(
fn(batch) if fn else batch for batch, fn in
safe_izip(self._dataset.get(self._source, next_index),
self._convert)
)
def _fallback_next(self, next_index):
# TODO: handle fancy-index copies by allocating a buffer and
# using np.take()
return tuple(
fn(data[next_index]) if fn else data[next_index]
for data, fn in safe_izip(self._raw_data, self._convert)
)
def __next__(self):
return self.next()
@property
@wraps(SubsetIterator.batch_size, assigned=(), updated=())
def batch_size(self):
return self._subset_iterator.batch_size
@property
@wraps(SubsetIterator.num_batches, assigned=(), updated=())
def num_batches(self):
return self._subset_iterator.num_batches
@property
@wraps(SubsetIterator.num_examples, assigned=(), updated=())
def num_examples(self):
return self._subset_iterator.num_examples
@property
@wraps(SubsetIterator.uneven, assigned=(), updated=())
def uneven(self):
return self._subset_iterator.uneven
@property
@wraps(SubsetIterator.stochastic, assigned=(), updated=())
def stochastic(self):
return self._subset_iterator.stochastic
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_DATA_MAP_DEFUN_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_MAP_DEFUN_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
// This op runs a given defun on slices of the input arguments. The function
// given by "f" is assumed to be stateless, and is executed concurrently
// on all the slices; up to batch_size (i.e. the 0th dimension of each argument)
// functions will be scheduled at once.
//
// The "max_intra_op_parallelism" attr, which defaults to 1, can be used to
// limit the intra op parallelism. To limit inter-op parallelism, a user
// can set a private threadpool on the dataset using `tf.data.Options`'s
// `ThreadingOptions`.
//
// Note that this op is not exposed to users directly, but is invoked in
// tf.data rewrites.
class MapDefunOp : public AsyncOpKernel {
public:
static constexpr const char* const kArguments = "arguments";
static constexpr const char* const kCapturedInputs = "captured_inputs";
static constexpr const char* const kTarguments = "Targuments";
static constexpr const char* const kTcaptured = "Tcaptured";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kFunc = "f";
static constexpr const char* const kMaxIntraOpParallelism =
"max_intra_op_parallelism";
explicit MapDefunOp(OpKernelConstruction* ctx);
~MapDefunOp() override = default;
void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override;
private:
struct ComputeOptions;
class MapFunctionCallFrame;
void SetRunOptions(OpKernelContext* ctx,
FunctionLibraryRuntime::Options* opts,
ComputeOptions* compute_opts, bool always_collect_stats);
// Get inputs to Compute and check that they are valid.
absl::Status SetupArgs(OpKernelContext* ctx, ComputeOptions** compute_opts);
absl::Status SetupOutputs(OpKernelContext* ctx, ComputeOptions* opts);
FunctionLibraryRuntime::Handle func_handle_;
std::vector<PartialTensorShape> output_shapes_;
// If this value is positive, limit the max intra op parallelism when the
// function is run on slices of the input.
int max_intra_op_parallelism_;
};
} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_MAP_DEFUN_OP_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/core/kernels/data/map_defun_op.h
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Mate Soos
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from __future__ import with_statement # Required in 2.5
from __future__ import print_function
import subprocess
import os
import stat
import fnmatch
import gzip
import re
import commands
import getopt
import sys
import signal
import resource
import time
import struct
import random
from random import choice
from subprocess import Popen, PIPE, STDOUT
# from optparse import OptionParser
import optparse
import glob
print("our CWD is: %s files here: %s" % (os.getcwd(), glob.glob("*")) )
sys.path.append(os.getcwd())
print("our sys.path is", sys.path)
from xor_to_cnf_class import *
from debuglib import *
class PlainHelpFormatter(optparse.IndentedHelpFormatter):
def format_description(self, description):
if description:
return description + "\n"
else:
return ""
usage = "usage: %prog [options] --fuzz/--regtest/--checkdir/filetocheck"
desc = """Fuzz the solver with fuzz-generator: ./fuzz_test.py
"""
parser = optparse.OptionParser(usage=usage, description=desc,
formatter=PlainHelpFormatter())
parser.add_option("--exec", metavar="SOLVER", dest="solver",
default="../../build/cryptominisat4",
help="SAT solver executable. Default: %default")
parser.add_option("--extraopts", "-e", metavar="OPTS",
dest="extra_options", default="",
help="Extra options to give to SAT solver")
parser.add_option("--verbose", "-v", action="store_true", default=False,
dest="verbose", help="Print more output")
# for fuzz-testing
parser.add_option("--seed", dest="fuzz_seed_start",
help="Fuzz test start seed", type=int)
parser.add_option("--fuzzlim", dest="fuzz_test_lim", type=int,
help="Number of fuzz tests to run"
)
parser.add_option("--novalgrind", dest="novalgrind", default=False,
action="store_true", help="No valgrind installed")
parser.add_option("--small", dest="small", default=False,
action="store_true", help="Don't run 'large' fuzzer (may mem-out on smaller systems)")
parser.add_option("--sqlite", dest="sqlite", default=False,
action="store_true", help="Test SQLite dumping")
parser.add_option("--gauss", dest="test_gauss", default=False,
action="store_true", help="Test gauss too")
parser.add_option("--tout", "-t", dest="maxtime", type=int, default=80,
help="Max time to run")
parser.add_option("--textra", dest="maxtimediff", type=int, default=20,
help="Extra time on top of timeout for processing")
(options, args) = parser.parse_args()
def fuzzer_call_failed():
print("OOps, fuzzer executable call failed!")
print("Did you build with cmake -DENABLE_TESTING=ON? Did you do git submodules init & update?")
exit(-1)
class solution_parser:
def __init__(self):
pass
@staticmethod
def parse_solution_from_output(output_lines, ignoreNoSolution):
if len(output_lines) == 0:
print("Error! SAT solver output is empty!")
print("output lines: %s" % output_lines)
print("Error code 500")
exit(500)
# solution will be put here
satunsatfound = False
vlinefound = False
solution = {}
conflict = None
# parse in solution
for line in output_lines:
# skip comment
if re.match('^conflict ', line):
line = line.strip().split()[1:]
conflict = [int(elem) for elem in line]
continue
if (re.match('^c ', line)):
continue
# solution
if (re.match('^s ', line)):
if (satunsatfound):
print("ERROR: solution twice in solver output!")
exit(400)
if 'UNSAT' in line:
unsat = True
satunsatfound = True
continue
if 'SAT' in line:
unsat = False
satunsatfound = True
continue
print("ERROR: line starts with 's' but no SAT/UNSAT on line")
exit(400)
# parse in solution
if (re.match('^v ', line)):
vlinefound = True
myvars = line.split(' ')
for var in myvars:
var = var.strip()
if var == "" or var == 'v':
continue
if (int(var) == 0):
break
intvar = int(var)
solution[abs(intvar)] = (intvar >= 0)
# print("Parsed values:", solution)
if (ignoreNoSolution is False and
(satunsatfound is False or (
unsat is False and vlinefound is False))):
print("Error: Cannot find line starting with 's' or 'v' in output!")
print(output_lines)
print("Error code 500")
exit(500)
if (ignoreNoSolution is True and
(satunsatfound is False or (
unsat is False and vlinefound is False))):
print("Probably timeout, since no solution printed. Could, of course, be segfault/assert fault, etc.")
print("Making it look like an UNSAT, so no checks!")
return (True, [])
if (satunsatfound is False):
print("Error: Cannot find if SAT or UNSAT. Maybe didn't finish running?")
print(output_lines)
print("Error code 500")
exit(500)
if (unsat is False and vlinefound is False):
print("Error: Solution is SAT, but no 'v' line")
print (output_lines)
print("Error code 500")
exit(500)
return unsat, solution, conflict
@staticmethod
def check_regular_clause(line, solution):
lits = line.split()
final = False
for lit in lits:
numlit = int(lit)
if numlit != 0:
if (abs(numlit) not in solution):
continue
if numlit < 0:
final |= ~solution[abs(numlit)]
else:
final |= solution[numlit]
if final is True:
break
if final is False:
print("Error: clause '%s' not satisfied." % line)
print("Error code 100")
exit(100)
@staticmethod
def check_xor_clause(line, solution):
line = line.lstrip('x')
lits = line.split()
final = False
for lit in lits:
numlit = int(lit)
if numlit != 0:
if abs(numlit) not in solution:
print("Error: var %d not solved, but referred to in a xor-clause of the CNF" % abs(numlit))
print("Error code 200")
exit(200)
final ^= solution[abs(numlit)]
final ^= numlit < 0
if final is False:
print("Error: xor-clause '%s' not satisfied." % line)
exit(-1)
@staticmethod
def test_found_solution(solution, fname, debugLibPart=None):
if debugLibPart is None:
print("Verifying solution for CNF file %s" % fname)
else:
print("Verifying solution for CNF file %s, part %d" %
(fname, debugLibPart))
if fnmatch.fnmatch(fname, '*.gz'):
f = gzip.open(fname, "r")
else:
f = open(fname, "r")
clauses = 0
thisDebugLibPart = 0
for line in f:
line = line.rstrip()
# skip empty lines
if len(line) == 0:
continue
# count debug lib parts
if line[0] == 'c' and "Solver::solve" in line:
thisDebugLibPart += 1
# if we are over debugLibPart, exit
if debugLibPart is not None and thisDebugLibPart >= debugLibPart:
f.close()
return
# check solution against clause
if line[0] != 'c' and line[0] != 'p':
if line[0] != 'x':
solution_parser.check_regular_clause(line, solution)
else:
solution_parser.check_xor_clause(line, solution)
clauses += 1
f.close()
print("Verified %d original xor®ular clauses" % clauses)
class create_fuzz:
@staticmethod
def unique_file(fname_begin, fname_end=".cnf"):
counter = 1
while 1:
fname = fname_begin + '_' + str(counter) + fname_end
try:
fd = os.open(
fname, os.O_CREAT | os.O_EXCL, stat.S_IREAD | stat.S_IWRITE)
os.fdopen(fd).close()
return fname
except OSError:
pass
counter += 1
def call_from_fuzzer(self, fuzzer, fname):
seed = random.randint(0, 1000000)
if len(fuzzer) == 1:
call = "{0} {1} > {2}".format(fuzzer[0], seed, fname)
elif len(fuzzer) == 2:
call = "{0} {1} {2} > {3}".format(
fuzzer[0], fuzzer[1], seed, fname)
elif len(fuzzer) == 3:
hashbits = (random.getrandbits(20) % 80) + 1
call = "%s %s %d %s %d > %s" % (
fuzzer[0], fuzzer[1], hashbits, fuzzer[2], seed, fname)
else:
assert False, "Fuzzer must have at most 2 arguments"
return call
def create_fuzz_file(self, fuzzer, fuzzers, fname):
# handle special fuzzer
fnames_multi = []
if len(fuzzer) == 2 and fuzzer[1] == "special":
# sometimes just fuzz with all SAT problems
fixed = random.getrandbits(1) == 1
for i in range(random.randrange(2, 4)):
fname2 = create_fuzz.unique_file("fuzzTest")
fnames_multi.append(fname2)
# chose a ranom fuzzer, not multipart
fuzzer2 = ["multipart.py", "special"]
while os.path.basename(fuzzer2[0]) == "multipart.py":
fuzzer2 = choice(fuzzers)
# sometimes fuzz with SAT problems only
if (fixed):
fuzzer2 = fuzzers[0]
print("fuzzer2 used: %s" % fuzzer2)
call = self.call_from_fuzzer(fuzzer2, fname2)
print("calling sub-fuzzer: %s" % call)
status, _ = commands.getstatusoutput(call)
if status != 0:
fuzzer_call_failed()
# construct multi-fuzzer call
call = ""
call += fuzzer[0]
call += " "
for name in fnames_multi:
call += " " + name
call += " > " + fname
return call, fnames_multi
# handle normal fuzzer
else:
return self.call_from_fuzzer(fuzzer, fname), []
def setlimits():
# sys.stdout.write("Setting resource limit in child (pid %d): %d s\n" %
# (os.getpid(), options.maxtime))
resource.setrlimit(resource.RLIMIT_CPU, (options.maxtime, options.maxtime))
def file_exists(fname):
try:
with open(fname):
return True
except IOError:
return False
def print_version():
command = options.solver + " --version"
p = subprocess.Popen(command.rsplit(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
consoleOutput, err = p.communicate()
print("Version values: %s" % consoleOutput.strip())
fuzzers_noxor = [
["../../build/tests/sha1-sat/sha1-gen --attack preimage --rounds 20",
"--hash-bits", "--seed"],
["../../build/tests/sha1-sat/sha1-gen --attack preimage --zero --message-bits 400 --rounds 8 --hash-bits 60",
"--seed"],
# ["build/cnf-fuzz-nossum"],
["../../build/tests/cnf-utils/largefuzzer"],
["../../build/tests/cnf-utils/cnf-fuzz-biere"],
["../../build/tests/cnf-utils/cnf-fuzz-biere"],
["../../build/tests/cnf-utils/cnf-fuzz-biere"],
["../../build/tests/cnf-utils/cnf-fuzz-biere"],
["../../build/tests/cnf-utils/cnf-fuzz-biere"],
["../../build/tests/cnf-utils/cnf-fuzz-biere"],
["../../build/tests/cnf-utils/cnf-fuzz-biere"],
["../../build/tests/cnf-utils/cnf-fuzz-biere"],
["../../build/tests/cnf-utils/cnf-fuzz-biere"],
["../../build/tests/cnf-utils/sgen4 -unsat -n 50", "-s"],
["../../build/tests/cnf-utils//sgen4 -sat -n 50", "-s"],
["../../utils/cnf-utils/cnf-fuzz-brummayer.py", "-s"],
["../../utils/cnf-utils/cnf-fuzz-xor.py", "--seed"],
["../../utils/cnf-utils/multipart.py", "special"]
]
fuzzers_xor = [
["../../utils/cnf-utils/xortester.py", "--seed"],
["../../build/tests/sha1-sat/sha1-gen --xor --attack preimage --rounds 21",
"--hash-bits", "--seed"],
]
class Tester:
def __init__(self):
self.ignoreNoSolution = False
self.extra_options_if_supported = self.list_options_if_supported(
["xor", "autodisablegauss"])
def list_options_if_supported(self, tocheck):
ret = []
for elem in tocheck:
if self.option_supported(elem):
ret.append(elem)
return ret
def option_supported(self, option_name):
command = options.solver
command += " --hhelp"
p = subprocess.Popen(
command.rsplit(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
consoleOutput, err = p.communicate()
for l in consoleOutput.split("\n"):
tmp_option_name = "--" + option_name
if tmp_option_name in l:
return True
return False
def random_options(self, preproc=False):
cmd = " --zero-exit-status "
if random.choice([True, False]):
cmd += " --reconf %d " % random.choice([3, 6, 7, 12, 13, 14])
cmd += " --reconfat %d " % random.randint(0, 2)
cmd += "--burst %d " % random.choice([0, 100, random.randint(0, 10000)])
cmd += "--keepguess %s " % random.randint(0, 10)
cmd += "--restart %s " % random.choice(
["geom", "glue", "luby"])
cmd += "--adjustglue %f " % random.choice([0, 0.5, 0.7, 1.0])
cmd += "--gluehist %s " % random.randint(1, 500)
cmd += "--updateglueonanalysis %s " % random.randint(0, 1)
cmd += "--otfhyper %s " % random.randint(0, 1)
# cmd += "--clean %s " % random.choice(["size", "glue", "activity",
# "prconf"])
cmd += "--cacheformoreminim %d " % random.choice([0, 1, 1, 1, 1])
cmd += "--stampformoreminim %d " % random.choice([0, 1, 1, 1, 1])
cmd += "--alwaysmoremin %s " % random.randint(0, 1)
cmd += "--rewardotfsubsume %s " % random.randint(0, 100)
cmd += "--bothprop %s " % random.randint(0, 1)
cmd += "--probemaxm %s " % random.choice([0, 10, 100, 1000])
cmd += "--cachesize %s " % random.randint(10, 100)
cmd += "--cachecutoff %s " % random.randint(0, 2000)
cmd += "--elimstrgy %s " % random.choice(["heuristic", "calculate"])
cmd += "--elimcplxupd %s " % random.randint(0, 1)
cmd += "--occredmax %s " % random.randint(0, 100)
cmd += "--noextbinsubs %s " % random.randint(0, 1)
cmd += "--extscc %s " % random.randint(0, 1)
cmd += "--distill %s " % random.randint(0, 1)
cmd += "--sortwatched %s " % random.randint(0, 1)
cmd += "--recur %s " % random.randint(0, 1)
cmd += "--compsfrom %d " % random.randint(0, 2)
cmd += "--compsvar %d " % random.randint(20000, 500000)
cmd += "--compslimit %d " % random.randint(0, 3000)
cmd += "--implicitmanip %s " % random.randint(0, 1)
cmd += "--occsimp %s " % random.randint(0, 1)
cmd += "--occirredmaxmb %s " % random.randint(0, 10)
cmd += "--occredmaxmb %s " % random.randint(0, 10)
cmd += "--skipresol %d " % random.choice([1, 1, 1, 0])
cmd += "--implsubsto %s " % random.choice([0, 10, 1000])
cmd += "--sync %d " % random.choice([100, 1000, 6000, 100000])
cmd += "-m %0.12f " % random.gammavariate(0.4, 2.0)
# gammavariate gives us sometimes very low values, sometimes large
if options.sqlite:
cmd += "--sql 2 "
cmd += "--sqlrestfull %d " % random.choice([0, 1])
cmd += "--sqlresttime %d " % random.choice([0, 1])
# the most buggy ones, don't turn them off much, please
if random.choice([True, False]):
opts = ["scc", "varelim", "comps", "strengthen", "probe", "intree",
"binpri", "stamp", "cache", "otfsubsume",
"renumber", "savemem", "moreminim", "gates", "bva",
"gorshort", "gandrem", "gateeqlit", "schedsimp", "presimp",
"elimcoststrategy"]
opts.extend(self.extra_options_if_supported)
for opt in opts:
cmd += "--%s %d " % (opt, random.randint(0, 1))
def create_rnd_sched(string_list):
opts = string_list.split(",")
opts = [a.strip(" ") for a in opts]
opts = list(set(opts))
if options.verbose:
print("available schedule options: %s" % opts)
sched = []
for i in range(int(random.gammavariate(12, 0.7))):
sched.append(random.choice(opts))
if "autodisablegauss" in self.extra_options_if_supported and options.test_gauss:
sched.append("occ-gauss")
return sched
cmd += self.add_schedule_options(create_rnd_sched, preproc)
return cmd
def add_schedule_options(self, create_rnd_sched, preproc):
cmd = ""
sched_opts = "handle-comps,"
sched_opts += "scc-vrepl, cache-clean, cache-tryboth,"
sched_opts += "sub-impl, intree-probe, probe,"
sched_opts += "sub-str-cls-with-bin, distill-cls, scc-vrepl, sub-impl,"
sched_opts += "str-impl, cache-clean, sub-str-cls-with-bin, distill-cls, scc-vrepl,"
sched_opts += "occ-backw-sub-str, occ-xor, occ-clean-implicit, occ-bve, occ-bva, occ-gates,"
sched_opts += "check-cache-size, renumber"
sched = ",".join(create_rnd_sched(sched_opts))
if sched != "" and not preproc:
cmd += "--schedule %s " % sched
sched = ",".join(create_rnd_sched(sched_opts))
if sched != "":
cmd += "--preschedule %s " % sched
return cmd
def execute(self, fname, fname2=None, fixed_opts="", rnd_opts=None):
if os.path.isfile(options.solver) is not True:
print("Error: Cannot find CryptoMiniSat executable.Searched in: '%s'" %
options.solver)
print("Error code 300")
exit(300)
for f in glob.glob("%s-debugLibPart*.output" % fname):
os.unlink(f)
# construct command
command = ""
if not options.novalgrind and random.randint(0, 10) == 0:
command += "valgrind -q --leak-check=full --error-exitcode=9 "
command += options.solver
if rnd_opts is None:
rnd_opts = self.random_options()
command += rnd_opts
if self.needDebugLib:
command += "--debuglib %s " % fname
if options.verbose is False:
command += "--verb 0 "
command += "--threads %d " % self.num_threads
command += options.extra_options + " "
command += fixed_opts + " "
if fname is not None:
command += fname
if fname2:
command += " %s --savedstate %s-savedstate.dat " % (fname2, fname2)
print("Executing: %s " % command)
# print time limit
if options.verbose:
print("CPU limit of parent (pid %d)" % os.getpid(), resource.getrlimit(resource.RLIMIT_CPU))
# if need time limit, then limit
err_fname = create_fuzz.unique_file("%s_err" % fname, ".out")
err_file = open(err_fname, "w")
p = subprocess.Popen(
command.rsplit(), stderr=err_file, stdout=subprocess.PIPE, preexec_fn=setlimits)
# print time limit after child startup
if options.verbose:
print("CPU limit of parent (pid %d) after startup of child" %
(os.getpid(), resource.getrlimit(resource.RLIMIT_CPU)))
# Get solver output
consoleOutput, err = p.communicate()
retcode = p.returncode
err_file.close()
with open(err_fname, "r") as err_file:
found_something = False
for line in err_file:
print("Error line while executing: %s" % line.strip())
if "std::_Ios_Fmtflags" in line or "mzd.h" in line or "lexical_cast.hpp" in line:
pass
else:
found_something = True
if found_something:
exit(-1)
os.unlink(err_fname)
if options.verbose:
print("CPU limit of parent (pid %d) after child finished executing" %
(os.getpid(), resource.getrlimit(resource.RLIMIT_CPU)))
return consoleOutput, retcode
def check_unsat(self, fname):
a = XorToCNF()
tmpfname = create_fuzz.unique_file("tmp_for_xor_to_cnf_convert")
a.convert(fname, tmpfname)
# execute with the other solver
toexec = "lingeling -f %s" % tmpfname
print("Solving with other solver: %s" % toexec)
curr_time = time.time()
try:
p = subprocess.Popen(toexec.rsplit(),
stdout=subprocess.PIPE,
preexec_fn=setlimits)
except OSError:
print("ERROR: Probably you don't have lingeling installed!")
raise
consoleOutput2 = p.communicate()[0]
os.unlink(tmpfname)
# if other solver was out of time, then we can't say anything
diff_time = time.time() - curr_time
if diff_time > options.maxtime - options.maxtimediff:
print("Other solver: too much time to solve, aborted!")
return None
# extract output from the other solver
print("Checking other solver output...")
otherSolverUNSAT, otherSolverSolution, _ = solution_parser.parse_solution_from_output(
consoleOutput2.split("\n"), self.ignoreNoSolution)
# check if the other solver agrees with us
return otherSolverUNSAT
def extract_lib_part(self, fname, debug_num, assumps, tofile):
fromf = open(fname, "r")
thisDebugLibPart = 0
maxvar = 0
numcls = 0
for line in fromf:
line = line.strip()
# ignore empty strings and headers
if not line or line[0] == "p":
continue
# process (potentially special) comments
if line[0] == "c":
if "Solver::solve" in line:
thisDebugLibPart += 1
continue
# break out if we reached the debug lib part
if thisDebugLibPart >= debug_num:
break
# count clauses and get max var number
numcls += 1
maxvar = max(maxvar, get_max_var_from_clause(line))
fromf.close()
# now we can create the new CNF file
fromf = open(fname, "r")
tof = open(tofile, "w")
tof.write("p cnf %d %d\n" % (maxvar, numcls + len(assumps)))
thisDebugLibPart = 0
for line in fromf:
line = line.strip()
# skip empty lines and headers
if not line or line[0] == "p":
continue
# parse up special header
if line[0] == "c":
if "Solver::solve" in line:
thisDebugLibPart += 1
continue
# break out if we reached the debug lib part
if thisDebugLibPart >= debug_num:
break
tof.write(line + '\n')
# add assumptions
for lit in assumps:
tof.write("%d 0\n" % lit)
fromf.close()
tof.close()
def get_assumps(self, fname, debugLibPart):
f = open(fname, "r")
thispart = 0
solveline = None
for line in f:
if "Solver::solve" in line:
thispart += 1
if thispart == debugLibPart:
solveline = line
break
f.close()
assert solveline is not None
ret = re.match("c.*Solver::solve\((.*)\)", solveline)
assert ret is not None
assumps = ret.group(1).strip().split()
assumps = [int(x) for x in assumps]
print("Assumptions: ", assumps)
return assumps
def check_assumps_inside_conflict(self, assumps, conflict):
for lit in conflict:
if -1 * lit not in assumps:
print("ERROR: Final conflict contains %s but assumps is %s" %(conflict, assumps))
print("ERROR: lit ", lit, " is in conflict but its inverse is not is assumps!")
exit(-100)
print("OK, final conflict only contains elements from assumptions")
def check_assumps_inside_solution(self, assumps, solution):
for lit in assumps:
var = abs(lit)
val = lit > 0
if var in solution:
if solution[var] != val:
print("Solution pinted has literal %s but assumptions contained the inverse: '%s'" % (-1 * lit, assumps))
exit(-100)
print("OK, all assumptions inside solution")
def find_largest_debuglib_part(self, fname):
largestPart = 0
dirList2 = os.listdir(".")
for fname_debug in dirList2:
if fnmatch.fnmatch(fname_debug, "%s-debugLibPart*.output" % fname):
largestPart += 1
return largestPart
def check_debug_lib(self, fname):
largestPart = self.find_largest_debuglib_part(fname)
for debugLibPart in range(1, largestPart + 1):
fname_debug = "%s-debugLibPart%d.output" % (fname, debugLibPart)
print("Checking debug lib part %s -- %s " % (debugLibPart, fname_debug))
if (os.path.isfile(fname_debug) is False):
print("Error: Filename to be read '%s' is not a file!" % fname_debug)
exit(-1)
# take file into mem
f = open(fname_debug, "r")
text = f.read()
output_lines = text.splitlines()
f.close()
unsat, solution, conflict = solution_parser.parse_solution_from_output(
output_lines, self.ignoreNoSolution)
assumps = self.get_assumps(fname, debugLibPart)
if unsat is False:
print("debugLib is SAT")
self.check_assumps_inside_solution(assumps, solution)
solution_parser.test_found_solution(solution, fname, debugLibPart)
else:
print("debugLib is UNSAT")
assert conflict is not None, "debugLibPart must create a conflict in case of UNSAT"
self.check_assumps_inside_conflict(assumps, conflict)
tmpfname = create_fuzz.unique_file("tmp_for_extract_libpart")
self.extract_lib_part(fname, debugLibPart, assumps, tmpfname)
# check with other solver
ret = self.check_unsat(tmpfname)
if ret is None:
print("Cannot check, other solver took too much time")
elif ret is True:
print("UNSAT verified by other solver")
else:
print("Grave bug: SAT-> UNSAT : Other solver found solution!!")
exit(-1)
os.unlink(tmpfname)
os.unlink(fname_debug)
def check(self, fname, fname2=None,
checkAgainst=None,
fixed_opts="", dump_output_fname=None,
rnd_opts=None):
consoleOutput = ""
if checkAgainst is None:
checkAgainst = fname
curr_time = time.time()
# Do we need to solve the problem, or is it already solved?
consoleOutput, retcode = self.execute(
fname, fname2=fname2,
fixed_opts=fixed_opts, rnd_opts=rnd_opts)
# if time was limited, we need to know if we were over the time limit
# and that is why there is no solution
diff_time = time.time() - curr_time
if diff_time > (options.maxtime - options.maxtimediff) / self.num_threads:
print("Too much time to solve, aborted!")
return None
print("Within time limit: %.2f s" % diff_time)
print("filename: %s" % fname)
# if library debug is set, check it
if (self.needDebugLib):
self.check_debug_lib(checkAgainst)
if retcode != 0:
print("Return code is not 0, error!")
exit(-1)
print("Checking console output...")
unsat, solution, _ = solution_parser.parse_solution_from_output(
consoleOutput.split("\n"), self.ignoreNoSolution)
# preprocessing
if dump_output_fname is not None:
f = open(dump_output_fname, "w")
f.write(consoleOutput)
f.close()
return True
if not unsat:
solution_parser.test_found_solution(solution, checkAgainst)
return
# it's UNSAT, let's check with DRAT
if fname2:
toexec = "drat-trim %s %s" % (fname, fname2)
print("Checking DRAT...: ", toexec)
p = subprocess.Popen(toexec.rsplit(), stdout=subprocess.PIPE)
consoleOutput2 = p.communicate()[0]
diff_time = time.time() - curr_time
# find verification code
foundVerif = False
dratLine = ""
for line in consoleOutput2.split('\n'):
if len(line) > 1 and line[:2] == "s ":
# print("verif: " , line)
foundVerif = True
if line[2:10] != "VERIFIED" and line[2:] != "TRIVIAL UNSAT":
print("DRAT verification error, it says: %s" % consoleOutput2)
assert line[2:10] == "VERIFIED" or line[
2:] == "TRIVIAL UNSAT", "DRAT didn't verify problem!"
dratLine = line
# Check whether we have found a verification code
if foundVerif is False:
print("verifier error! It says: %s" % consoleOutput2)
assert foundVerif, "Cannot find DRAT verification code!"
else:
print("OK, DRAT says: %s" % dratLine)
# check with other solver
ret = self.check_unsat(checkAgainst)
if ret is None:
print("Other solver time-outed, cannot check")
elif ret is True:
print("UNSAT verified by other solver")
else:
print("Grave bug: SAT-> UNSAT : Other solver found solution!!")
exit()
def fuzz_test_one(self):
print("\n--- NORMAL TESTING ---")
self.num_threads = random.choice([1, 2, 4])
self.drat = self.num_threads == 1 and random.choice([True, False])
if self.drat:
fuzzers = fuzzers_drat
else:
fuzzers = fuzzers_nodrat
fuzzer = random.choice(fuzzers)
fname = create_fuzz.unique_file("fuzzTest")
fname_drat = None
if self.drat:
fname_drat = "%s-drat" % fname
# create the fuzz file
cf = create_fuzz()
call, todel = cf.create_fuzz_file(fuzzer, fuzzers, fname)
print("calling %s" % call)
status, _ = commands.getstatusoutput(call)
if status != 0:
fuzzer_call_failed()
if not self.drat:
self.needDebugLib = True
interspersed_fname = create_fuzz.unique_file("fuzzTest")
seed_for_inters = random.randint(0, 1000000)
intersperse(fname, interspersed_fname, seed_for_inters)
print("Interspersed: ./intersperse.py %s %s %d" % (fname,
interspersed_fname,
seed_for_inters))
os.unlink(fname)
else:
self.needDebugLib = False
interspersed_fname = fname
self.check(fname=interspersed_fname, fname2=fname_drat)
# remove temporary filenames
os.unlink(interspersed_fname)
for name in todel:
os.unlink(name)
if fname_drat:
os.unlink(fname_drat)
def delete_file_no_matter_what(self, fname):
try:
os.unlink(fname)
except:
pass
def fuzz_test_preproc(self):
print("\n--- PREPROC TESTING ---")
tester.needDebugLib = False
fuzzer = random.choice(fuzzers_drat)
self.num_threads = 1
fname = create_fuzz.unique_file("fuzzTest")
self.drat = False
# create the fuzz file
cf = create_fuzz()
call, todel = cf.create_fuzz_file(fuzzer, fuzzers_nodrat, fname)
print("calling %s : %s" % (fuzzer, call))
status, _ = commands.getstatusoutput(call)
if status != 0:
fuzzer_call_failed()
rnd_opts = self.random_options(preproc=True)
# preprocess
simp = "%s-simplified.cnf" % fname
self.delete_file_no_matter_what(simp)
curr_time = time.time()
console, retcode = self.execute(fname, fname2=simp,
rnd_opts=rnd_opts,
fixed_opts="--preproc 1")
diff_time = time.time() - curr_time
if diff_time > (options.maxtime - options.maxtimediff) / self.num_threads:
print("Too much time to solve, aborted!")
else:
print("Within time limit: %.2f s" % diff_time)
if retcode != 0:
print("Return code is not 0, error!")
exit(-1)
solution = "%s-solution.txt" % fname
ret = self.check(fname=simp, dump_output_fname=solution)
if ret is not None:
# didn't time out, so let's reconstruct the solution
savedstate = "%s-savedstate.dat" % simp
self.check(fname=solution, checkAgainst=fname,
fixed_opts="--preproc 2 --savedstate %s" % savedstate,
rnd_opts=rnd_opts)
os.unlink(savedstate)
os.unlink(solution)
# remove temporary filenames
os.unlink(fname)
for name in todel:
os.unlink(name)
def filter_large_fuzzer(dat):
f = []
for x in dat:
okay = True
for y in x:
if "large" in y:
okay = False
if okay:
f.append(x)
return f
global fuzzers_drat
global fuzzers_nodrat
fuzzers_drat = fuzzers_noxor
fuzzers_nodrat = fuzzers_noxor + fuzzers_xor
if options.small:
fuzzers_drat = filter_large_fuzzer(fuzzers_drat)
fuzzers_nodrat = filter_large_fuzzer(fuzzers_nodrat)
print_version()
tester = Tester()
tester.needDebugLib = False
num = 0
rnd_seed = options.fuzz_seed_start
if rnd_seed is None:
rnd_seed = random.randint(0, 1000*1000*100)
while True:
toexec = "./fuzz_test.py --fuzzlim 1 --seed %d" % rnd_seed
if options.novalgrind:
toexec += " --novalgrind"
if options.small:
toexec += " --small"
print("To re-create fuzz-test below: %s" % toexec)
random.seed(rnd_seed)
if random.choice([True, False]):
tester.fuzz_test_preproc()
else:
tester.fuzz_test_one()
rnd_seed += 1
num += 1
if options.fuzz_test_lim is not None and num >= options.fuzz_test_lim:
exit(0)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Samragni Banerjee <samragnibanerjee4@gmail.com>
# Alexander Sokolov <alexander.y.sokolov@gmail.com>
#
import unittest
import numpy
import math
from pyscf import gto
from pyscf import scf
from pyscf import adc
mol = gto.Mole()
r = 0.957492
x = r * math.sin(104.468205 * math.pi/(2 * 180.0))
y = r * math.cos(104.468205* math.pi/(2 * 180.0))
mol.atom = [
['O', ( 0., 0. , 0)],
['H', ( 0., -x, y)],
['H', ( 0., x , y)],]
mol.basis = {'H': 'aug-cc-pVDZ',
'O': 'aug-cc-pVDZ',}
mol.verbose = 0
mol.build()
mf = scf.RHF(mol)
mf.conv_tol = 1e-12
mf.kernel()
myadc = adc.ADC(mf)
def tearDownModule():
global mol, mf
del mol, mf
class KnownValues(unittest.TestCase):
def test_ea_adc2(self):
e, t_amp1, t_amp2 = myadc.kernel_gs()
self.assertAlmostEqual(e, -0.2218560609876961, 6)
myadcea = adc.radc.RADCEA(myadc)
e,v,p,x = myadcea.kernel(nroots=3)
self.assertAlmostEqual(e[0], 0.0287675413010661, 6)
self.assertAlmostEqual(e[1], 0.0553475511361251, 6)
self.assertAlmostEqual(e[2], 0.1643553780332306, 6)
self.assertAlmostEqual(p[0], 1.9868196915945326, 6)
self.assertAlmostEqual(p[1], 1.9941128865405613, 6)
self.assertAlmostEqual(p[2], 1.9760420333383126, 6)
def test_ea_adc2x(self):
myadc.method = "adc(2)-x"
e, t_amp1, t_amp2 = myadc.kernel_gs()
self.assertAlmostEqual(e, -0.2218560609876961, 6)
myadcea = adc.radc.RADCEA(myadc)
e,v,p,x = myadcea.kernel(nroots=3)
self.assertAlmostEqual(e[0], 0.0270276135717527, 6)
self.assertAlmostEqual(e[1], 0.0546446308721235, 6)
self.assertAlmostEqual(e[2], 0.1614552196278816, 6)
self.assertAlmostEqual(p[0], 1.9782643804856972, 6)
self.assertAlmostEqual(p[1], 1.9905409664546319, 6)
self.assertAlmostEqual(p[2], 1.9593142553574816, 6)
def test_ea_adc3(self):
myadc.method = "adc(3)"
e, t_amp1, t_amp2 = myadc.kernel_gs()
self.assertAlmostEqual(e, -0.2263968409281272, 6)
myadcea = adc.radc.RADCEA(myadc)
e,v,p,x = myadcea.kernel(nroots=4)
myadcea.analyze()
self.assertAlmostEqual(e[0], 0.0277406670820452, 6)
self.assertAlmostEqual(e[1], 0.0551456657778995, 6)
self.assertAlmostEqual(e[2], 0.1620710279026066, 6)
self.assertAlmostEqual(e[3], 0.1882010099486046, 6)
self.assertAlmostEqual(p[0], 1.9814233118436899, 6)
self.assertAlmostEqual(p[1], 1.9920778842193207, 6)
self.assertAlmostEqual(p[2], 1.9676462978544356, 6)
self.assertAlmostEqual(p[3], 1.9743650630026532, 6)
if __name__ == "__main__":
print("EA calculations for different RADC methods for water molecule")
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import os
import pkgutil
import sys
import warnings
# This package exists to host vendored top-level Python packages for downstream packaging. Any Python packages
# installed beneath this one will be masked from the Ansible loader, and available from the front of sys.path.
# It is expected that the vendored packages will be loaded very early, so a warning will be fired on import of
# the top-level ansible package if any packages beneath this are already loaded at that point.
#
# Python packages may be installed here during downstream packaging using something like:
# pip install --upgrade -t (path to this dir) cryptography pyyaml packaging jinja2
# mask vendored content below this package from being accessed as an ansible subpackage
__path__ = []
def _ensure_vendored_path_entry():
"""
Ensure that any downstream-bundled content beneath this package is available at the top of sys.path
"""
# patch our vendored dir onto sys.path
vendored_path_entry = os.path.dirname(__file__)
vendored_module_names = set(m[1] for m in pkgutil.iter_modules([vendored_path_entry], '')) # m[1] == m.name
if vendored_module_names:
# patch us early to load vendored deps transparently
if vendored_path_entry in sys.path:
# handle reload case by removing the existing entry, wherever it might be
sys.path.remove(vendored_path_entry)
sys.path.insert(0, vendored_path_entry)
already_loaded_vendored_modules = set(sys.modules.keys()).intersection(vendored_module_names)
if already_loaded_vendored_modules:
warnings.warn('One or more Python packages bundled by this ansible-core distribution were already '
'loaded ({0}). This may result in undefined behavior.'.format(', '.join(sorted(already_loaded_vendored_modules))))
_ensure_vendored_path_entry()
|
python
|
github
|
https://github.com/ansible/ansible
|
lib/ansible/_vendor/__init__.py
|
"""
=========================================
Adapting gray-scale filters to RGB images
=========================================
There are many filters that are designed to work with gray-scale images but not
with color images. To simplify the process of creating functions that can adapt
to RGB images, scikit-image provides the ``adapt_rgb`` decorator.
To actually use the ``adapt_rgb`` decorator, you have to decide how you want to
adapt the RGB image for use with the gray-scale filter. There are two
pre-defined handlers:
``each_channel``
Pass each of the RGB channels to the filter one-by-one, and stitch the
results back into an RGB image.
``hsv_value``
Convert the RGB image to HSV and pass the value channel to the filter.
The filtered result is inserted back into the HSV image and converted
back to RGB.
Below, we demonstrate the use of ``adapt_rgb`` on a couple of gray-scale
filters:
"""
from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
from skimage import filters
@adapt_rgb(each_channel)
def sobel_each(image):
return filters.sobel(image)
@adapt_rgb(hsv_value)
def sobel_hsv(image):
return filters.sobel(image)
######################################################################
# We can use these functions as we would normally use them, but now they work
# with both gray-scale and color images. Let's plot the results with a color
# image:
from skimage import data
from skimage.exposure import rescale_intensity
import matplotlib.pyplot as plt
image = data.astronaut()
fig = plt.figure(figsize=(14, 7))
ax_each = fig.add_subplot(121, adjustable='box-forced')
ax_hsv = fig.add_subplot(122, sharex=ax_each, sharey=ax_each,
adjustable='box-forced')
# We use 1 - sobel_each(image)
# but this will not work if image is not normalized
ax_each.imshow(rescale_intensity(1 - sobel_each(image)))
ax_each.set_xticks([]), ax_each.set_yticks([])
ax_each.set_title("Sobel filter computed\n on individual RGB channels")
# We use 1 - sobel_hsv(image) but this will not work if image is not normalized
ax_hsv.imshow(rescale_intensity(1 - sobel_hsv(image)))
ax_hsv.set_xticks([]), ax_hsv.set_yticks([])
ax_hsv.set_title("Sobel filter computed\n on (V)alue converted image (HSV)")
######################################################################
# Notice that the result for the value-filtered image preserves the color of
# the original image, but channel filtered image combines in a more
# surprising way. In other common cases, smoothing for example, the channel
# filtered image will produce a better result than the value-filtered image.
#
# You can also create your own handler functions for ``adapt_rgb``. To do so,
# just create a function with the following signature::
#
# def handler(image_filter, image, *args, **kwargs):
# # Manipulate RGB image here...
# image = image_filter(image, *args, **kwargs)
# # Manipulate filtered image here...
# return image
#
# Note that ``adapt_rgb`` handlers are written for filters where the image is
# the first argument.
#
# As a very simple example, we can just convert any RGB image to grayscale
# and then return the filtered result:
from skimage.color import rgb2gray
def as_gray(image_filter, image, *args, **kwargs):
gray_image = rgb2gray(image)
return image_filter(gray_image, *args, **kwargs)
######################################################################
# It's important to create a signature that uses ``*args`` and ``**kwargs``
# to pass arguments along to the filter so that the decorated function is
# allowed to have any number of positional and keyword arguments.
#
# Finally, we can use this handler with ``adapt_rgb`` just as before:
@adapt_rgb(as_gray)
def sobel_gray(image):
return filters.sobel(image)
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111, sharex=ax_each, sharey=ax_each,
adjustable='box-forced')
# We use 1 - sobel_gray(image)
# but this will not work if image is not normalized
ax.imshow(rescale_intensity(1 - sobel_gray(image)), cmap=plt.cm.gray)
ax.set_xticks([]), ax.set_yticks([])
ax.set_title("Sobel filter computed\n on the converted grayscale image")
plt.show()
######################################################################
#
# .. note::
#
# A very simple check of the array shape is used for detecting RGB
# images, so ``adapt_rgb`` is not recommended for functions that support
# 3D volumes or color images in non-RGB spaces.
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// MakeCleanup(f) returns an RAII cleanup object that calls 'f' in its
// destructor. The easiest way to use MakeCleanup is with a lambda argument,
// capturing the return value in an 'auto' local variable. Most users will not
// need more sophisticated syntax than that.
//
// Example:
// void func() {
// FILE* fp = fopen("data.txt", "r");
// if (fp == nullptr) return;
// auto fp_cleaner = gtl::MakeCleanup([fp] { fclose(fp); });
// // No matter what, fclose(fp) will happen.
// DataObject d;
// while (ReadDataObject(fp, &d)) {
// if (d.IsBad()) {
// LOG(ERROR) << "Bad Data";
// return;
// }
// PushGoodData(d);
// }
// }
//
// You can use Cleanup<F> directly, instead of using MakeCleanup and auto,
// but there's rarely a reason to do that.
//
// You can call 'release()' on a Cleanup object to cancel the cleanup.
#ifndef TENSORFLOW_C_EXPERIMENTAL_FILESYSTEM_PLUGINS_GCS_CLEANUP_H_
#define TENSORFLOW_C_EXPERIMENTAL_FILESYSTEM_PLUGINS_GCS_CLEANUP_H_
#include <type_traits>
#include <utility>
namespace tf_gcs_filesystem {
// A move-only RAII object that calls a stored cleanup functor when
// destroyed. Cleanup<F> is the return type of gtl::MakeCleanup(F).
template <typename F>
class Cleanup {
public:
Cleanup() : released_(true), f_() {}
template <typename G>
explicit Cleanup(G&& f) // NOLINT
: f_(std::forward<G>(f)) {} // NOLINT(build/c++11)
Cleanup(Cleanup&& src) // NOLINT
: released_(src.is_released()), f_(src.release()) {}
// Implicitly move-constructible from any compatible Cleanup<G>.
// The source will be released as if src.release() were called.
// A moved-from Cleanup can be safely destroyed or reassigned.
template <typename G>
Cleanup(Cleanup<G>&& src) // NOLINT
: released_(src.is_released()), f_(src.release()) {}
// Assignment to a Cleanup object behaves like destroying it
// and making a new one in its place, analogous to unique_ptr
// semantics.
Cleanup& operator=(Cleanup&& src) { // NOLINT
if (!released_) f_();
released_ = src.released_;
f_ = src.release();
return *this;
}
~Cleanup() {
if (!released_) f_();
}
// Releases the cleanup function instead of running it.
// Hint: use c.release()() to run early.
F release() {
released_ = true;
return std::move(f_);
}
bool is_released() const { return released_; }
private:
static_assert(!std::is_reference<F>::value, "F must not be a reference");
bool released_ = false;
F f_;
};
template <int&... ExplicitParameterBarrier, typename F,
typename DecayF = typename std::decay<F>::type>
Cleanup<DecayF> MakeCleanup(F&& f) {
return Cleanup<DecayF>(std::forward<F>(f));
}
} // namespace tf_gcs_filesystem
#endif // TENSORFLOW_C_EXPERIMENTAL_FILESYSTEM_PLUGINS_GCS_CLEANUP_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/c/experimental/filesystem/plugins/gcs/cleanup.h
|
// Code generated by "stringer -type Quality"; DO NOT EDIT.
package plans
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[Errored-0]
_ = x[NoChanges-1]
}
const _Quality_name = "ErroredNoChanges"
var _Quality_index = [...]uint8{0, 7, 16}
func (i Quality) String() string {
idx := int(i) - 0
if i < 0 || idx >= len(_Quality_index)-1 {
return "Quality(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Quality_name[_Quality_index[idx]:_Quality_index[idx+1]]
}
|
go
|
github
|
https://github.com/hashicorp/terraform
|
internal/plans/quality_string.go
|
use rustc_data_structures::fx::FxIndexMap;
use rustc_hir::def_id::DefId;
use rustc_middle::ty::{self, OutlivesPredicate, TyCtxt};
use super::utils::*;
#[derive(Debug)]
pub(crate) struct ExplicitPredicatesMap<'tcx> {
map: FxIndexMap<DefId, ty::EarlyBinder<'tcx, RequiredPredicates<'tcx>>>,
}
impl<'tcx> ExplicitPredicatesMap<'tcx> {
pub(crate) fn new() -> ExplicitPredicatesMap<'tcx> {
ExplicitPredicatesMap { map: FxIndexMap::default() }
}
pub(crate) fn explicit_predicates_of(
&mut self,
tcx: TyCtxt<'tcx>,
def_id: DefId,
) -> &ty::EarlyBinder<'tcx, RequiredPredicates<'tcx>> {
self.map.entry(def_id).or_insert_with(|| {
let predicates = if def_id.is_local() {
tcx.explicit_predicates_of(def_id)
} else {
tcx.predicates_of(def_id)
};
let mut required_predicates = RequiredPredicates::default();
// process predicates and convert to `RequiredPredicates` entry, see below
for &(predicate, span) in predicates.predicates {
match predicate.kind().skip_binder() {
ty::ClauseKind::TypeOutlives(OutlivesPredicate(ty, reg)) => {
insert_outlives_predicate(
tcx,
ty.into(),
reg,
span,
&mut required_predicates,
)
}
ty::ClauseKind::RegionOutlives(OutlivesPredicate(reg1, reg2)) => {
insert_outlives_predicate(
tcx,
reg1.into(),
reg2,
span,
&mut required_predicates,
)
}
ty::ClauseKind::Trait(_)
| ty::ClauseKind::Projection(_)
| ty::ClauseKind::ConstArgHasType(_, _)
| ty::ClauseKind::WellFormed(_)
| ty::ClauseKind::ConstEvaluatable(_)
| ty::ClauseKind::UnstableFeature(_)
| ty::ClauseKind::HostEffect(..) => {}
}
}
ty::EarlyBinder::bind(required_predicates)
})
}
}
|
rust
|
github
|
https://github.com/rust-lang/rust
|
compiler/rustc_hir_analysis/src/outlives/explicit.rs
|
from django.db import models
from django.contrib.auth.models import AbstractUser
class Guild(models.Model):
name = models.CharField(max_length=40, unique=True, db_index=True)
owner = models.ForeignKey('Hero')
def __unicode__(self):
return self.name
class Title(models.Model):
name = models.CharField(max_length=40, unique=True, db_index=True)
def __unicode__(self):
return self.name
class Level(models.Model):
exp = models.IntegerField()
def __unicode__(self):
return "%s [%s]" % (self.id, self.exp)
class Hero(AbstractUser):
exp = models.IntegerField(default=0)
credits = models.IntegerField(default=0)
titles = models.ManyToManyField(Title, null=True, blank=True)
guilds = models.ManyToManyField(Guild, null=True, blank=True)
@property
def level(self):
levels = Level.objects.filter(exp__lte=self.exp).order_by('exp').reverse()
if levels:
return levels[0]
else:
return None
@property
def level_no(self):
if self.level is not None:
return self.level.id
else:
return 0
@property
def next_level(self):
next_lvl = Level.objects.filter(pk=int(self.level_no) + 1)
if next_lvl.exists():
return next_lvl[0]
else:
return self.level
@property
def exp_percentage(self):
if self.exp:
return int((float(self.exp) / float(self.next_level.exp)) * 100)
else:
return 0
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import __version__
from ansible.errors import AnsibleError
from distutils.version import LooseVersion
from operator import eq, ge, gt
from sys import version_info
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
version_requirement = '2.8.0'
version_tested_max = '2.9.10'
python3_required_version = '2.5.3'
if version_info[0] == 3 and not ge(LooseVersion(__version__), LooseVersion(python3_required_version)):
raise AnsibleError(('Ansible >= {} is required when using Python 3.\n'
'Either downgrade to Python 2 or update your Ansible version to {}.').format(python3_required_version, python3_required_version))
if not ge(LooseVersion(__version__), LooseVersion(version_requirement)):
raise AnsibleError(('Trellis no longer supports Ansible {}.\n'
'Please upgrade to Ansible {} or higher.').format(__version__, version_requirement))
elif gt(LooseVersion(__version__), LooseVersion(version_tested_max)):
display.warning(u'Your Ansible version is {} but this version of Trellis has only been tested for '
u'compatability with Ansible {} -> {}. It is advisable to check for Trellis updates or '
u'downgrade your Ansible version.'.format(__version__, version_requirement, version_tested_max))
if eq(LooseVersion(__version__), LooseVersion('2.5.0')):
display.warning(u'Your Ansible version is {}. Consider upgrading your Ansible version to avoid '
u'erroneous warnings such as `Removed restricted key from module data...`'.format(__version__))
# Import BaseVarsPlugin after Ansible version check.
# Otherwise import error for Ansible versions older than 2.4 would prevent display of version check message.
from ansible.plugins.vars import BaseVarsPlugin
class VarsModule(BaseVarsPlugin):
def get_vars(self, loader, path, entities, cache=True):
return {}
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* Copyright 2021 - 2025 R. Thomas
* Copyright 2021 - 2025 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "LIEF/PE/signature/RsaInfo.hpp"
#include <mbedtls/bignum.h>
#include <mbedtls/md.h>
#include <mbedtls/rsa.h>
#include <utility>
namespace LIEF {
namespace PE {
RsaInfo::RsaInfo() = default;
RsaInfo::RsaInfo(const RsaInfo::rsa_ctx_handle ctx) {
const auto* pctx = reinterpret_cast<const mbedtls_rsa_context*>(ctx);
auto* local_ctx = new mbedtls_rsa_context{};
mbedtls_rsa_init(local_ctx);
mbedtls_rsa_set_padding(local_ctx, pctx->private_padding,
static_cast<mbedtls_md_type_t>(pctx->private_hash_id));
mbedtls_rsa_copy(local_ctx, pctx);
mbedtls_rsa_complete(local_ctx);
ctx_ = reinterpret_cast<RsaInfo::rsa_ctx_handle>(local_ctx);
}
RsaInfo::RsaInfo(const RsaInfo& other)
{
if (other.ctx_ != nullptr) {
const auto* octx = reinterpret_cast<const mbedtls_rsa_context*>(other.ctx_);
auto* local_ctx = new mbedtls_rsa_context{};
mbedtls_rsa_init(local_ctx);
mbedtls_rsa_set_padding(local_ctx, octx->private_padding,
static_cast<mbedtls_md_type_t>(octx->private_hash_id));
mbedtls_rsa_copy(local_ctx, octx);
mbedtls_rsa_complete(local_ctx);
ctx_ = reinterpret_cast<RsaInfo::rsa_ctx_handle>(local_ctx);
}
}
RsaInfo::RsaInfo(RsaInfo&& other) :
ctx_{other.ctx_}
{}
RsaInfo& RsaInfo::operator=(RsaInfo other) {
swap(other);
return *this;
}
void RsaInfo::swap(RsaInfo& other) {
std::swap(ctx_, other.ctx_);
}
RsaInfo::operator bool() const {
return ctx_ != nullptr;
}
bool RsaInfo::has_public_key() const {
auto* lctx = reinterpret_cast<mbedtls_rsa_context*>(ctx_);
return mbedtls_rsa_check_pubkey(lctx) == 0;
}
bool RsaInfo::has_private_key() const {
auto* lctx = reinterpret_cast<mbedtls_rsa_context*>(ctx_);
return mbedtls_rsa_check_privkey(lctx) == 0;
}
RsaInfo::bignum_wrapper_t RsaInfo::N() const {
auto* lctx = reinterpret_cast<mbedtls_rsa_context*>(ctx_);
bignum_wrapper_t N(mbedtls_mpi_size(&lctx->private_N));
mbedtls_mpi_write_binary(&lctx->private_N, N.data(), N.size());
return N;
}
RsaInfo::bignum_wrapper_t RsaInfo::E() const {
auto* lctx = reinterpret_cast<mbedtls_rsa_context*>(ctx_);
bignum_wrapper_t E(mbedtls_mpi_size(&lctx->private_E));
mbedtls_mpi_write_binary(&lctx->private_E, E.data(), E.size());
return E;
}
RsaInfo::bignum_wrapper_t RsaInfo::D() const {
auto* lctx = reinterpret_cast<mbedtls_rsa_context*>(ctx_);
bignum_wrapper_t D(mbedtls_mpi_size(&lctx->private_D));
mbedtls_mpi_write_binary(&lctx->private_D, D.data(), D.size());
return D;
}
RsaInfo::bignum_wrapper_t RsaInfo::P() const {
auto* lctx = reinterpret_cast<mbedtls_rsa_context*>(ctx_);
bignum_wrapper_t P(mbedtls_mpi_size(&lctx->private_P));
mbedtls_mpi_write_binary(&lctx->private_P, P.data(), P.size());
return P;
}
RsaInfo::bignum_wrapper_t RsaInfo::Q() const {
auto* lctx = reinterpret_cast<mbedtls_rsa_context*>(ctx_);
bignum_wrapper_t Q(mbedtls_mpi_size(&lctx->private_Q));
mbedtls_mpi_write_binary(&lctx->private_Q, Q.data(), Q.size());
return Q;
}
size_t RsaInfo::key_size() const {
auto* lctx = reinterpret_cast<mbedtls_rsa_context*>(ctx_);
return mbedtls_rsa_get_len(lctx) * 8;
}
RsaInfo::~RsaInfo() {
if (ctx_ != nullptr) {
auto* lctx = reinterpret_cast<mbedtls_rsa_context*>(ctx_);
mbedtls_rsa_free(lctx);
delete lctx;
}
}
std::ostream& operator<<(std::ostream& os, const RsaInfo& info) {
if (!info) {
os << "<Empty>";
} else {
// TODO
}
return os;
}
}
}
|
cpp
|
github
|
https://github.com/nodejs/node
|
deps/LIEF/src/PE/signature/RsaInfo.cpp
|
from __future__ import annotations
import importlib.util
import os
import sys
import typing as t
from datetime import datetime
from functools import cache
from functools import update_wrapper
import werkzeug.utils
from werkzeug.exceptions import abort as _wz_abort
from werkzeug.utils import redirect as _wz_redirect
from werkzeug.wrappers import Response as BaseResponse
from .globals import _cv_app
from .globals import app_ctx
from .globals import current_app
from .globals import request
from .globals import session
from .signals import message_flashed
if t.TYPE_CHECKING: # pragma: no cover
from .wrappers import Response
def get_debug_flag() -> bool:
"""Get whether debug mode should be enabled for the app, indicated by the
:envvar:`FLASK_DEBUG` environment variable. The default is ``False``.
"""
val = os.environ.get("FLASK_DEBUG")
return bool(val and val.lower() not in {"0", "false", "no"})
def get_load_dotenv(default: bool = True) -> bool:
"""Get whether the user has disabled loading default dotenv files by
setting :envvar:`FLASK_SKIP_DOTENV`. The default is ``True``, load
the files.
:param default: What to return if the env var isn't set.
"""
val = os.environ.get("FLASK_SKIP_DOTENV")
if not val:
return default
return val.lower() in ("0", "false", "no")
@t.overload
def stream_with_context(
generator_or_function: t.Iterator[t.AnyStr],
) -> t.Iterator[t.AnyStr]: ...
@t.overload
def stream_with_context(
generator_or_function: t.Callable[..., t.Iterator[t.AnyStr]],
) -> t.Callable[[t.Iterator[t.AnyStr]], t.Iterator[t.AnyStr]]: ...
def stream_with_context(
generator_or_function: t.Iterator[t.AnyStr] | t.Callable[..., t.Iterator[t.AnyStr]],
) -> t.Iterator[t.AnyStr] | t.Callable[[t.Iterator[t.AnyStr]], t.Iterator[t.AnyStr]]:
"""Wrap a response generator function so that it runs inside the current
request context. This keeps :data:`.request`, :data:`.session`, and :data:`.g`
available, even though at the point the generator runs the request context
will typically have ended.
Use it as a decorator on a generator function:
.. code-block:: python
from flask import stream_with_context, request, Response
@app.get("/stream")
def streamed_response():
@stream_with_context
def generate():
yield "Hello "
yield request.args["name"]
yield "!"
return Response(generate())
Or use it as a wrapper around a created generator:
.. code-block:: python
from flask import stream_with_context, request, Response
@app.get("/stream")
def streamed_response():
def generate():
yield "Hello "
yield request.args["name"]
yield "!"
return Response(stream_with_context(generate()))
.. versionadded:: 0.9
"""
try:
gen = iter(generator_or_function) # type: ignore[arg-type]
except TypeError:
def decorator(*args: t.Any, **kwargs: t.Any) -> t.Any:
gen = generator_or_function(*args, **kwargs) # type: ignore[operator]
return stream_with_context(gen)
return update_wrapper(decorator, generator_or_function) # type: ignore[arg-type]
def generator() -> t.Iterator[t.AnyStr]:
if (ctx := _cv_app.get(None)) is None:
raise RuntimeError(
"'stream_with_context' can only be used when a request"
" context is active, such as in a view function."
)
with ctx:
yield None # type: ignore[misc]
try:
yield from gen
finally:
# Clean up in case the user wrapped a WSGI iterator.
if hasattr(gen, "close"):
gen.close()
# Execute the generator to the sentinel value. This captures the current
# context and pushes it to preserve it. Further iteration will yield from
# the original iterator.
wrapped_g = generator()
next(wrapped_g)
return wrapped_g
def make_response(*args: t.Any) -> Response:
"""Sometimes it is necessary to set additional headers in a view. Because
views do not have to return response objects but can return a value that
is converted into a response object by Flask itself, it becomes tricky to
add headers to it. This function can be called instead of using a return
and you will get a response object which you can use to attach headers.
If view looked like this and you want to add a new header::
def index():
return render_template('index.html', foo=42)
You can now do something like this::
def index():
response = make_response(render_template('index.html', foo=42))
response.headers['X-Parachutes'] = 'parachutes are cool'
return response
This function accepts the very same arguments you can return from a
view function. This for example creates a response with a 404 error
code::
response = make_response(render_template('not_found.html'), 404)
The other use case of this function is to force the return value of a
view function into a response which is helpful with view
decorators::
response = make_response(view_function())
response.headers['X-Parachutes'] = 'parachutes are cool'
Internally this function does the following things:
- if no arguments are passed, it creates a new response argument
- if one argument is passed, :meth:`flask.Flask.make_response`
is invoked with it.
- if more than one argument is passed, the arguments are passed
to the :meth:`flask.Flask.make_response` function as tuple.
.. versionadded:: 0.6
"""
if not args:
return current_app.response_class()
if len(args) == 1:
args = args[0]
return current_app.make_response(args)
def url_for(
endpoint: str,
*,
_anchor: str | None = None,
_method: str | None = None,
_scheme: str | None = None,
_external: bool | None = None,
**values: t.Any,
) -> str:
"""Generate a URL to the given endpoint with the given values.
This requires an active request or application context, and calls
:meth:`current_app.url_for() <flask.Flask.url_for>`. See that method
for full documentation.
:param endpoint: The endpoint name associated with the URL to
generate. If this starts with a ``.``, the current blueprint
name (if any) will be used.
:param _anchor: If given, append this as ``#anchor`` to the URL.
:param _method: If given, generate the URL associated with this
method for the endpoint.
:param _scheme: If given, the URL will have this scheme if it is
external.
:param _external: If given, prefer the URL to be internal (False) or
require it to be external (True). External URLs include the
scheme and domain. When not in an active request, URLs are
external by default.
:param values: Values to use for the variable parts of the URL rule.
Unknown keys are appended as query string arguments, like
``?a=b&c=d``.
.. versionchanged:: 2.2
Calls ``current_app.url_for``, allowing an app to override the
behavior.
.. versionchanged:: 0.10
The ``_scheme`` parameter was added.
.. versionchanged:: 0.9
The ``_anchor`` and ``_method`` parameters were added.
.. versionchanged:: 0.9
Calls ``app.handle_url_build_error`` on build errors.
"""
return current_app.url_for(
endpoint,
_anchor=_anchor,
_method=_method,
_scheme=_scheme,
_external=_external,
**values,
)
def redirect(
location: str, code: int = 303, Response: type[BaseResponse] | None = None
) -> BaseResponse:
"""Create a redirect response object.
If :data:`~flask.current_app` is available, it will use its
:meth:`~flask.Flask.redirect` method, otherwise it will use
:func:`werkzeug.utils.redirect`.
:param location: The URL to redirect to.
:param code: The status code for the redirect.
:param Response: The response class to use. Not used when
``current_app`` is active, which uses ``app.response_class``.
.. versionchanged:: 3.2
``code`` defaults to ``303`` instead of ``302``.
.. versionadded:: 2.2
Calls ``current_app.redirect`` if available instead of always
using Werkzeug's default ``redirect``.
"""
if (ctx := _cv_app.get(None)) is not None:
return ctx.app.redirect(location, code=code)
return _wz_redirect(location, code=code, Response=Response)
def abort(code: int | BaseResponse, *args: t.Any, **kwargs: t.Any) -> t.NoReturn:
"""Raise an :exc:`~werkzeug.exceptions.HTTPException` for the given
status code.
If :data:`~flask.current_app` is available, it will call its
:attr:`~flask.Flask.aborter` object, otherwise it will use
:func:`werkzeug.exceptions.abort`.
:param code: The status code for the exception, which must be
registered in ``app.aborter``.
:param args: Passed to the exception.
:param kwargs: Passed to the exception.
.. versionadded:: 2.2
Calls ``current_app.aborter`` if available instead of always
using Werkzeug's default ``abort``.
"""
if (ctx := _cv_app.get(None)) is not None:
ctx.app.aborter(code, *args, **kwargs)
_wz_abort(code, *args, **kwargs)
def get_template_attribute(template_name: str, attribute: str) -> t.Any:
"""Loads a macro (or variable) a template exports. This can be used to
invoke a macro from within Python code. If you for example have a
template named :file:`_cider.html` with the following contents:
.. sourcecode:: html+jinja
{% macro hello(name) %}Hello {{ name }}!{% endmacro %}
You can access this from Python code like this::
hello = get_template_attribute('_cider.html', 'hello')
return hello('World')
.. versionadded:: 0.2
:param template_name: the name of the template
:param attribute: the name of the variable of macro to access
"""
return getattr(current_app.jinja_env.get_template(template_name).module, attribute)
def flash(message: str, category: str = "message") -> None:
"""Flashes a message to the next request. In order to remove the
flashed message from the session and to display it to the user,
the template has to call :func:`get_flashed_messages`.
.. versionchanged:: 0.3
`category` parameter added.
:param message: the message to be flashed.
:param category: the category for the message. The following values
are recommended: ``'message'`` for any kind of message,
``'error'`` for errors, ``'info'`` for information
messages and ``'warning'`` for warnings. However any
kind of string can be used as category.
"""
# Original implementation:
#
# session.setdefault('_flashes', []).append((category, message))
#
# This assumed that changes made to mutable structures in the session are
# always in sync with the session object, which is not true for session
# implementations that use external storage for keeping their keys/values.
flashes = session.get("_flashes", [])
flashes.append((category, message))
session["_flashes"] = flashes
app = current_app._get_current_object()
message_flashed.send(
app,
_async_wrapper=app.ensure_sync,
message=message,
category=category,
)
def get_flashed_messages(
with_categories: bool = False, category_filter: t.Iterable[str] = ()
) -> list[str] | list[tuple[str, str]]:
"""Pulls all flashed messages from the session and returns them.
Further calls in the same request to the function will return
the same messages. By default just the messages are returned,
but when `with_categories` is set to ``True``, the return value will
be a list of tuples in the form ``(category, message)`` instead.
Filter the flashed messages to one or more categories by providing those
categories in `category_filter`. This allows rendering categories in
separate html blocks. The `with_categories` and `category_filter`
arguments are distinct:
* `with_categories` controls whether categories are returned with message
text (``True`` gives a tuple, where ``False`` gives just the message text).
* `category_filter` filters the messages down to only those matching the
provided categories.
See :doc:`/patterns/flashing` for examples.
.. versionchanged:: 0.3
`with_categories` parameter added.
.. versionchanged:: 0.9
`category_filter` parameter added.
:param with_categories: set to ``True`` to also receive categories.
:param category_filter: filter of categories to limit return values. Only
categories in the list will be returned.
"""
flashes = app_ctx._flashes
if flashes is None:
flashes = session.pop("_flashes") if "_flashes" in session else []
app_ctx._flashes = flashes
if category_filter:
flashes = list(filter(lambda f: f[0] in category_filter, flashes))
if not with_categories:
return [x[1] for x in flashes]
return flashes
def _prepare_send_file_kwargs(**kwargs: t.Any) -> dict[str, t.Any]:
ctx = app_ctx._get_current_object()
if kwargs.get("max_age") is None:
kwargs["max_age"] = ctx.app.get_send_file_max_age
kwargs.update(
environ=ctx.request.environ,
use_x_sendfile=ctx.app.config["USE_X_SENDFILE"],
response_class=ctx.app.response_class,
_root_path=ctx.app.root_path,
)
return kwargs
def send_file(
path_or_file: os.PathLike[t.AnyStr] | str | t.IO[bytes],
mimetype: str | None = None,
as_attachment: bool = False,
download_name: str | None = None,
conditional: bool = True,
etag: bool | str = True,
last_modified: datetime | int | float | None = None,
max_age: None | (int | t.Callable[[str | None], int | None]) = None,
) -> Response:
"""Send the contents of a file to the client.
The first argument can be a file path or a file-like object. Paths
are preferred in most cases because Werkzeug can manage the file and
get extra information from the path. Passing a file-like object
requires that the file is opened in binary mode, and is mostly
useful when building a file in memory with :class:`io.BytesIO`.
Never pass file paths provided by a user. The path is assumed to be
trusted, so a user could craft a path to access a file you didn't
intend. Use :func:`send_from_directory` to safely serve
user-requested paths from within a directory.
If the WSGI server sets a ``file_wrapper`` in ``environ``, it is
used, otherwise Werkzeug's built-in wrapper is used. Alternatively,
if the HTTP server supports ``X-Sendfile``, configuring Flask with
``USE_X_SENDFILE = True`` will tell the server to send the given
path, which is much more efficient than reading it in Python.
:param path_or_file: The path to the file to send, relative to the
current working directory if a relative path is given.
Alternatively, a file-like object opened in binary mode. Make
sure the file pointer is seeked to the start of the data.
:param mimetype: The MIME type to send for the file. If not
provided, it will try to detect it from the file name.
:param as_attachment: Indicate to a browser that it should offer to
save the file instead of displaying it.
:param download_name: The default name browsers will use when saving
the file. Defaults to the passed file name.
:param conditional: Enable conditional and range responses based on
request headers. Requires passing a file path and ``environ``.
:param etag: Calculate an ETag for the file, which requires passing
a file path. Can also be a string to use instead.
:param last_modified: The last modified time to send for the file,
in seconds. If not provided, it will try to detect it from the
file path.
:param max_age: How long the client should cache the file, in
seconds. If set, ``Cache-Control`` will be ``public``, otherwise
it will be ``no-cache`` to prefer conditional caching.
.. versionchanged:: 2.0
``download_name`` replaces the ``attachment_filename``
parameter. If ``as_attachment=False``, it is passed with
``Content-Disposition: inline`` instead.
.. versionchanged:: 2.0
``max_age`` replaces the ``cache_timeout`` parameter.
``conditional`` is enabled and ``max_age`` is not set by
default.
.. versionchanged:: 2.0
``etag`` replaces the ``add_etags`` parameter. It can be a
string to use instead of generating one.
.. versionchanged:: 2.0
Passing a file-like object that inherits from
:class:`~io.TextIOBase` will raise a :exc:`ValueError` rather
than sending an empty file.
.. versionadded:: 2.0
Moved the implementation to Werkzeug. This is now a wrapper to
pass some Flask-specific arguments.
.. versionchanged:: 1.1
``filename`` may be a :class:`~os.PathLike` object.
.. versionchanged:: 1.1
Passing a :class:`~io.BytesIO` object supports range requests.
.. versionchanged:: 1.0.3
Filenames are encoded with ASCII instead of Latin-1 for broader
compatibility with WSGI servers.
.. versionchanged:: 1.0
UTF-8 filenames as specified in :rfc:`2231` are supported.
.. versionchanged:: 0.12
The filename is no longer automatically inferred from file
objects. If you want to use automatic MIME and etag support,
pass a filename via ``filename_or_fp`` or
``attachment_filename``.
.. versionchanged:: 0.12
``attachment_filename`` is preferred over ``filename`` for MIME
detection.
.. versionchanged:: 0.9
``cache_timeout`` defaults to
:meth:`Flask.get_send_file_max_age`.
.. versionchanged:: 0.7
MIME guessing and etag support for file-like objects was
removed because it was unreliable. Pass a filename if you are
able to, otherwise attach an etag yourself.
.. versionchanged:: 0.5
The ``add_etags``, ``cache_timeout`` and ``conditional``
parameters were added. The default behavior is to add etags.
.. versionadded:: 0.2
"""
return werkzeug.utils.send_file( # type: ignore[return-value]
**_prepare_send_file_kwargs(
path_or_file=path_or_file,
environ=request.environ,
mimetype=mimetype,
as_attachment=as_attachment,
download_name=download_name,
conditional=conditional,
etag=etag,
last_modified=last_modified,
max_age=max_age,
)
)
def send_from_directory(
directory: os.PathLike[str] | str,
path: os.PathLike[str] | str,
**kwargs: t.Any,
) -> Response:
"""Send a file from within a directory using :func:`send_file`.
.. code-block:: python
@app.route("/uploads/<path:name>")
def download_file(name):
return send_from_directory(
app.config['UPLOAD_FOLDER'], name, as_attachment=True
)
This is a secure way to serve files from a folder, such as static
files or uploads. Uses :func:`~werkzeug.security.safe_join` to
ensure the path coming from the client is not maliciously crafted to
point outside the specified directory.
If the final path does not point to an existing regular file,
raises a 404 :exc:`~werkzeug.exceptions.NotFound` error.
:param directory: The directory that ``path`` must be located under,
relative to the current application's root path. This *must not*
be a value provided by the client, otherwise it becomes insecure.
:param path: The path to the file to send, relative to
``directory``.
:param kwargs: Arguments to pass to :func:`send_file`.
.. versionchanged:: 2.0
``path`` replaces the ``filename`` parameter.
.. versionadded:: 2.0
Moved the implementation to Werkzeug. This is now a wrapper to
pass some Flask-specific arguments.
.. versionadded:: 0.5
"""
return werkzeug.utils.send_from_directory( # type: ignore[return-value]
directory, path, **_prepare_send_file_kwargs(**kwargs)
)
def get_root_path(import_name: str) -> str:
"""Find the root path of a package, or the path that contains a
module. If it cannot be found, returns the current working
directory.
Not to be confused with the value returned by :func:`find_package`.
:meta private:
"""
# Module already imported and has a file attribute. Use that first.
mod = sys.modules.get(import_name)
if mod is not None and hasattr(mod, "__file__") and mod.__file__ is not None:
return os.path.dirname(os.path.abspath(mod.__file__))
# Next attempt: check the loader.
try:
spec = importlib.util.find_spec(import_name)
if spec is None:
raise ValueError
except (ImportError, ValueError):
loader = None
else:
loader = spec.loader
# Loader does not exist or we're referring to an unloaded main
# module or a main module without path (interactive sessions), go
# with the current working directory.
if loader is None:
return os.getcwd()
if hasattr(loader, "get_filename"):
filepath = loader.get_filename(import_name) # pyright: ignore
else:
# Fall back to imports.
__import__(import_name)
mod = sys.modules[import_name]
filepath = getattr(mod, "__file__", None)
# If we don't have a file path it might be because it is a
# namespace package. In this case pick the root path from the
# first module that is contained in the package.
if filepath is None:
raise RuntimeError(
"No root path can be found for the provided module"
f" {import_name!r}. This can happen because the module"
" came from an import hook that does not provide file"
" name information or because it's a namespace package."
" In this case the root path needs to be explicitly"
" provided."
)
# filepath is import_name.py for a module, or __init__.py for a package.
return os.path.dirname(os.path.abspath(filepath)) # type: ignore[no-any-return]
@cache
def _split_blueprint_path(name: str) -> list[str]:
out: list[str] = [name]
if "." in name:
out.extend(_split_blueprint_path(name.rpartition(".")[0]))
return out
|
python
|
github
|
https://github.com/pallets/flask
|
src/flask/helpers.py
|
# Owner(s): ["module: unknown"]
from torch.testing import FileCheck
from torch.testing._internal.common_utils import run_tests, TestCase
class TestFileCheck(TestCase):
def test_not_run(self):
stdout, _ = self.run_process_no_exception(
"""\
from torch.testing import FileCheck
file_check = FileCheck().check("not run")
del file_check
""",
)
FileCheck().check("You have not run this instance of FileCheck!").check_next(
"FileCheck checks:"
).check_next("\tCHECK: not run").run(stdout)
def test_all_python_api(self):
test_string = """
check check_same
check_next
check_count
check_dag
check_source_highlighted
~~~~~~~~~~~~~~~~~~~~~~~~
check_regex
"""
FileCheck().check("check").check_not("check_not").check_same(
"check_same"
).check_next("check_next").check_count("check_count", 1).check_dag(
"check_dag"
).check_source_highlighted("check_source_highlighted").check_regex(
r"check_.+"
).run(test_string)
FileCheck().run(
"""
# CHECK: check
# CHECK-NOT: check_not
# CHECK-SAME: check_same
# CHECK-NEXT: check_next
# CHECK-DAG: check_dag
# CHECK-SOURCE-HIGHLIGHTED: check_source_highlighted
# CHECK-REGEX: check_.+
""",
test_string,
)
if __name__ == "__main__":
run_tests()
|
python
|
github
|
https://github.com/pytorch/pytorch
|
test/test_file_check.py
|
# frozen_string_literal: true
require "concurrent/map"
require "active_support/core_ext/module/attribute_accessors"
require "action_view/template/resolver"
module ActionView
# = Action View Lookup Context
#
# <tt>LookupContext</tt> is the object responsible for holding all information
# required for looking up templates, i.e. view paths and details.
# <tt>LookupContext</tt> is also responsible for generating a key, given to
# view paths, used in the resolver cache lookup. Since this key is generated
# only once during the request, it speeds up all cache accesses.
class LookupContext # :nodoc:
attr_accessor :prefixes
singleton_class.attr_accessor :registered_details
self.registered_details = []
def self.register_detail(name, &block)
registered_details << name
Accessors::DEFAULT_PROCS[name] = block
Accessors.define_method(:"default_#{name}", &block)
Accessors.module_eval <<-METHOD, __FILE__, __LINE__ + 1
def #{name}
@details[:#{name}] || []
end
def #{name}=(value)
value = value.present? ? Array(value) : default_#{name}
_set_detail(:#{name}, value) if value != @details[:#{name}]
end
METHOD
end
# Holds accessors for the registered details.
module Accessors # :nodoc:
DEFAULT_PROCS = {}
end
register_detail(:locale) do
locales = [I18n.locale]
locales.concat(I18n.fallbacks[I18n.locale]) if I18n.respond_to? :fallbacks
locales << I18n.default_locale
locales.uniq!
locales
end
register_detail(:formats) { ActionView::Base.default_formats || [:html, :text, :js, :css, :xml, :json] }
register_detail(:variants) { [] }
register_detail(:handlers) { Template::Handlers.extensions }
class DetailsKey # :nodoc:
alias :eql? :equal?
@details_keys = Concurrent::Map.new
@digest_cache = Concurrent::Map.new
@view_context_mutex = Mutex.new
def self.digest_cache(details)
@digest_cache[details_cache_key(details)] ||= Concurrent::Map.new
end
def self.details_cache_key(details)
@details_keys.fetch(details) do
if formats = details[:formats]
unless Template::Types.valid_symbols?(formats)
details = details.dup
details[:formats] &= Template::Types.symbols
end
end
@details_keys[details] ||= TemplateDetails::Requested.new(**details)
end
end
def self.clear
ActionView::PathRegistry.all_resolvers.each do |resolver|
resolver.clear_cache
end
@view_context_class = nil
@details_keys.clear
@digest_cache.clear
end
def self.digest_caches
@digest_cache.values
end
def self.view_context_class
@view_context_mutex.synchronize do
@view_context_class ||= ActionView::Base.with_empty_template_cache
end
end
end
# Add caching behavior on top of Details.
module DetailsCache
attr_accessor :cache
# Calculate the details key. Remove the handlers from calculation to improve performance
# since the user cannot modify it explicitly.
def details_key # :nodoc:
@details_key ||= DetailsKey.details_cache_key(@details) if @cache
end
# Temporary skip passing the details_key forward.
def disable_cache
old_value, @cache = @cache, false
yield
ensure
@cache = old_value
end
private
def _set_detail(key, value) # :doc:
@details = @details.dup if @digest_cache || @details_key
@digest_cache = nil
@details_key = nil
@details[key] = value
end
end
# Helpers related to template lookup using the lookup context information.
module ViewPaths
attr_reader :view_paths, :html_fallback_for_js
def find(name, prefixes = [], partial = false, keys = [], options = {})
name, prefixes = normalize_name(name, prefixes)
details, details_key = detail_args_for(options)
@view_paths.find(name, prefixes, partial, details, details_key, keys)
end
alias :find_template :find
def find_all(name, prefixes = [], partial = false, keys = [], options = {})
name, prefixes = normalize_name(name, prefixes)
details, details_key = detail_args_for(options)
@view_paths.find_all(name, prefixes, partial, details, details_key, keys)
end
def exists?(name, prefixes = [], partial = false, keys = [], **options)
name, prefixes = normalize_name(name, prefixes)
details, details_key = detail_args_for(options)
@view_paths.exists?(name, prefixes, partial, details, details_key, keys)
end
alias :template_exists? :exists?
def any?(name, prefixes = [], partial = false)
name, prefixes = normalize_name(name, prefixes)
details, details_key = detail_args_for_any
@view_paths.exists?(name, prefixes, partial, details, details_key, [])
end
alias :any_templates? :any?
def append_view_paths(paths)
@view_paths = build_view_paths(@view_paths.to_a + paths)
end
def prepend_view_paths(paths)
@view_paths = build_view_paths(paths + @view_paths.to_a)
end
private
# Whenever setting view paths, makes a copy so that we can manipulate them in
# instance objects as we wish.
def build_view_paths(paths)
if ActionView::PathSet === paths
paths
else
ActionView::PathSet.new(Array(paths))
end
end
# Compute details hash and key according to user options (e.g. passed from #render).
def detail_args_for(options) # :doc:
return @details, details_key if options.empty? # most common path.
user_details = @details.merge(options)
if @cache
details_key = DetailsKey.details_cache_key(user_details)
else
details_key = nil
end
[user_details, details_key]
end
def detail_args_for_any
@detail_args_for_any ||= begin
details = {}
LookupContext.registered_details.each do |k|
if k == :variants
details[k] = :any
else
details[k] = Accessors::DEFAULT_PROCS[k].call
end
end
if @cache
[details, DetailsKey.details_cache_key(details)]
else
[details, nil]
end
end
end
# Fix when prefix is specified as part of the template name
def normalize_name(name, prefixes)
name = name.to_s
idx = name.rindex("/")
return name, prefixes.presence || [""] unless idx
path_prefix = name[0, idx]
path_prefix = path_prefix.from(1) if path_prefix.start_with?("/")
name = name.from(idx + 1)
if !prefixes || prefixes.empty?
prefixes = [path_prefix]
else
prefixes = prefixes.map { |p| "#{p}/#{path_prefix}" }
end
return name, prefixes
end
end
include Accessors
include DetailsCache
include ViewPaths
def initialize(view_paths, details = {}, prefixes = [])
@details_key = nil
@digest_cache = nil
@cache = true
@prefixes = prefixes
@details = initialize_details({}, details)
@view_paths = build_view_paths(view_paths)
end
def digest_cache
@digest_cache ||= DetailsKey.digest_cache(@details)
end
def with_prepended_formats(formats)
details = @details.dup
details[:formats] = formats
self.class.new(@view_paths, details, @prefixes)
end
def initialize_details(target, details)
LookupContext.registered_details.each do |k|
target[k] = details[k] || Accessors::DEFAULT_PROCS[k].call
end
target
end
private :initialize_details
# Override formats= to expand ["*/*"] values and automatically
# add :html as fallback to :js.
def formats=(values)
if values
values = values.dup
values.concat(default_formats) if values.delete "*/*"
values.uniq!
unless Template::Types.valid_symbols?(values)
invalid_values = values - Template::Types.symbols
raise ArgumentError, "Invalid formats: #{invalid_values.map(&:inspect).join(", ")}"
end
if (values.length == 1) && (values[0] == :js)
values << :html
@html_fallback_for_js = true
end
end
super(values)
end
# Override locale to return a symbol instead of array.
def locale
@details[:locale].first
end
# Overload locale= to also set the I18n.locale. If the current I18n.config object responds
# to original_config, it means that it has a copy of the original I18n configuration and it's
# acting as proxy, which we need to skip.
def locale=(value)
if value
config = I18n.config.respond_to?(:original_config) ? I18n.config.original_config : I18n.config
config.locale = value
end
super(default_locale)
end
end
end
|
ruby
|
github
|
https://github.com/rails/rails
|
actionview/lib/action_view/lookup_context.rb
|
# coding: utf8
from __future__ import unicode_literals
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ..norm_exceptions import BASE_NORMS
from ...util import update_exc, add_lookups
from ...language import Language
from ...lookups import Lookups
from ...attrs import LANG, NORM
from .lemmatizer import UkrainianLemmatizer
class UkrainianDefaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = lambda text: "uk"
lex_attr_getters[NORM] = add_lookups(
Language.Defaults.lex_attr_getters[NORM], BASE_NORMS
)
lex_attr_getters.update(LEX_ATTRS)
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
stop_words = STOP_WORDS
@classmethod
def create_lemmatizer(cls, nlp=None, lookups=None):
if lookups is None:
lookups = Lookups()
return UkrainianLemmatizer(lookups)
class Ukrainian(Language):
lang = "uk"
Defaults = UkrainianDefaults
__all__ = ["Ukrainian"]
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import division, print_function, absolute_import
#from pnet.vzlog import default as vz
import numpy as np
import amitgroup as ag
import itertools as itr
import sys
import os
import pnet
import time
def test(ims, labels, net):
yhat = net.classify(ims)
return yhat == labels
if pnet.parallel.main(__name__):
ag.set_verbose(True)
print("1")
import argparse
parser = argparse.ArgumentParser()
#parser.add_argument('seed', metavar='<seed>', type=int, help='Random seed')
#parser.add_argument('param', metavar='<param>', type=string)
parser.add_argument('model',metavar='<model file>',type=argparse.FileType('rb'), help='Filename of model file')
print("ohhh")
parser.add_argument('data',metavar='<mnist data file>',type=argparse.FileType('rb'),help='Filename of data file')
parser.add_argument('label',metavar='<mnist data file>',type=argparse.FileType('rb'),help='Filename of data file')
parser.add_argument('numOfClassModel',metavar='<numOfClassModel>', type=int, help='num Of Class Model')
args = parser.parse_args()
param = args.model
numOfClassModel = args.numOfClassModel
param = args.data
data = np.load(param)
label = np.load(args.label)
net = pnet.PartsNet.load(args.model)
unsup_training_times = []
sup_training_times = []
testing_times = []
error_rates = []
all_num_parts = []
ims10k = data[:10000]
label10k = np.array(label[:10000])
np.save('a.npy',label10k)
ims2k = data[10000:12000]
label2k = np.array(label[10000:12000])
np.save('b.npy',label2k)
print(ims2k.shape)
digits = range(10)
sup_ims = []
sup_labels = []
# Load supervised training data
for d in digits:
ims0 = ims10k[label10k == d]
sup_ims.append(ims0)
sup_labels.append(d * np.ones(len(ims0), dtype=np.int64))
sup_ims = np.concatenate(sup_ims, axis=0)
sup_labels = np.concatenate(sup_labels, axis=0)
print("=================")
print(sup_ims.shape)
print(sup_labels)
for classifier in 'mixture', 'svm':
for rotspread in [0, 1]:
net.layers[0]._settings['rotation_spreading_radius'] = rotspread
print('Classifier:', classifier, 'Rotational spreading:', rotspread)
if classifier == 'mixture':
cl = pnet.MixtureClassificationLayer(n_components=numOfClassModel, min_prob=1e-5)
elif classifier == 'svm':
cl = pnet.SVMClassificationLayer(C=None)
clnet = pnet.PartsNet([net, cl])
start1 = time.time()
print('Training supervised...')
print(sup_ims.shape)
clnet.train(sup_ims, sup_labels)
print('Done.')
end1 = time.time()
#print("Now testing...")
### Test ######################################################################
corrects = 0
total = 0
if 0:
test_ims, test_labels = mnist_data['test_image'], mnist_data['test_label']
else:
test_ims = ims2k
test_labels = label2k
with gv.Timer("Split to batches"):
ims_batches = np.array_split(test_ims, 10)
labels_batches = np.array_split(test_labels, 10)
def format_error_rate(pr):
return "{:.2f}%".format(100*(1-pr))
#with gv.Timer('Testing'):
start2 = time.time()
args = (tup+(clnet,) for tup in itr.izip(ims_batches, labels_batches))
for i, res in enumerate(pnet.parallel.starmap(test, args)):
corrects += res.sum()
total += res.size
pr = corrects / total
end2 = time.time()
error_rate = 1.0 - pr
num_parts = 0#net.layers[1].num_parts
error_rates.append(error_rate)
print(training_seed, 'error rate', error_rate * 100, 'num parts', num_parts)#, 'num parts 2', net.layers[3].num_parts)
unsup_training_times.append(end0 - start0)
sup_training_times.append(end1 - start1)
testing_times.append(end2 - start2)
#print('times', end0-start0, end1-start1, end2-start2)
all_num_parts.append(num_parts)
#vz.section('MNIST')
#gv.img.save_image(vz.generate_filename(), test_ims[0])
#gv.img.save_image(vz.generate_filename(), test_ims[1])
#gv.img.save_image(vz.generate_filename(), test_ims[2])
# Vz
#net.infoplot(vz)
if 0:
print(r"{ppl} & {depth} & {num_parts} & {unsup_time:.1f} & {test_time:.1f} & ${rate:.2f} \pm {std:.2f}$ \\".format(
ppl=2,
depth=maxdepth,
num_parts=r'${:.0f} \pm {:.0f}$'.format(np.mean(all_num_parts), np.std(all_num_parts)),
unsup_time=np.median(unsup_training_times) / 60,
#sup_time=np.median(sup_training_times),
test_time=np.median(testing_times) / 60,
rate=100*np.mean(error_rates),
std=100*np.std(error_rates)))
print(r"{ppl} {depth} {num_parts} {unsup_time} {test_time} {rate} {std}".format(
ppl=2,
depth=maxdepth,
num_parts=r'${:.0f} \pm {:.0f}$'.format(np.mean(all_num_parts), np.std(all_num_parts)),
unsup_time=np.median(unsup_training_times) / 60,
#sup_time=np.median(sup_training_times),
test_time=np.median(testing_times) / 60,
rate=100*np.mean(error_rates),
std=100*np.std(error_rates)))
#np.savez('gdata2-{}-{}-{}.npz'.format(maxdepth, split_criterion, split_entropy), all_num_parts=all_num_parts, unsup_time=unsup_training_times, test_time=testing_times, rates=error_rates)
print('mean error rate', np.mean(error_rates) * 100)
#net.save(args.model)
|
unknown
|
codeparrot/codeparrot-clean
| ||
---
title: Webhooks
# can make a quick how-to on creating a webhook, this was copy/pasted from another doc, needs to be reviewed first
hidden: true
---
# Webhooks
Resource routes can be used to handle webhooks. For example, you can create a webhook that receives notifications from GitHub when a new commit is pushed to a repository:
```tsx
import type { Route } from "./+types/github";
import crypto from "node:crypto";
export const action = async ({
request,
}: Route.ActionArgs) => {
if (request.method !== "POST") {
return Response.json(
{ message: "Method not allowed" },
{
status: 405,
},
);
}
const payload = await request.json();
/* Validate the webhook */
const signature = request.headers.get(
"X-Hub-Signature-256",
);
const generatedSignature = `sha256=${crypto
.createHmac("sha256", process.env.GITHUB_WEBHOOK_SECRET)
.update(JSON.stringify(payload))
.digest("hex")}`;
if (signature !== generatedSignature) {
return Response.json(
{ message: "Signature mismatch" },
{
status: 401,
},
);
}
/* process the webhook (e.g. enqueue a background job) */
return Response.json({ success: true });
};
```
|
unknown
|
github
|
https://github.com/remix-run/react-router
|
docs/how-to/webhook.md
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import gettext
import fcntl
import hmac
import os
import pipes
import pty
import pwd
import random
import re
import select
import shlex
import subprocess
import time
from hashlib import sha1
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.plugins.connections import ConnectionBase
class Connection(ConnectionBase):
''' ssh based connections '''
has_pipelining = True
become_methods = frozenset(C.BECOME_METHODS).difference(['runas'])
def __init__(self, *args, **kwargs):
# SSH connection specific init stuff
self._common_args = []
self.HASHED_KEY_MAGIC = "|1|"
# FIXME: move the lockfile locations to ActionBase?
#fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
#self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
self._cp_dir = '/tmp'
#fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
super(Connection, self).__init__(*args, **kwargs)
self.host = self._play_context.remote_addr
@property
def transport(self):
''' used to identify this connection object from other classes '''
return 'ssh'
def _connect(self):
''' connect to the remote host '''
self._display.vvv("ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
if self._connected:
return self
extra_args = C.ANSIBLE_SSH_ARGS
if extra_args is not None:
# make sure there is no empty string added as this can produce weird errors
self._common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()]
else:
self._common_args += (
"-o", "ControlMaster=auto",
"-o", "ControlPersist=60s",
"-o", "ControlPath=\"{0}\"".format(C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self._cp_dir)),
)
cp_in_use = False
cp_path_set = False
for arg in self._common_args:
if "ControlPersist" in arg:
cp_in_use = True
if "ControlPath" in arg:
cp_path_set = True
if cp_in_use and not cp_path_set:
self._common_args += ("-o", "ControlPath=\"{0}\"".format(
C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self._cp_dir))
)
if not C.HOST_KEY_CHECKING:
self._common_args += ("-o", "StrictHostKeyChecking=no")
if self._play_context.port is not None:
self._common_args += ("-o", "Port={0}".format(self._play_context.port))
if self._play_context.private_key_file is not None:
self._common_args += ("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(self._play_context.private_key_file)))
if self._play_context.password:
self._common_args += ("-o", "GSSAPIAuthentication=no",
"-o", "PubkeyAuthentication=no")
else:
self._common_args += ("-o", "KbdInteractiveAuthentication=no",
"-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
"-o", "PasswordAuthentication=no")
if self._play_context.remote_user is not None and self._play_context.remote_user != pwd.getpwuid(os.geteuid())[0]:
self._common_args += ("-o", "User={0}".format(self._play_context.remote_user))
self._common_args += ("-o", "ConnectTimeout={0}".format(self._play_context.timeout))
self._connected = True
return self
def _run(self, cmd, indata):
if indata:
# do not use pseudo-pty
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = p.stdin
else:
# try to use upseudo-pty
try:
# Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors
master, slave = pty.openpty()
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = os.fdopen(master, 'w', 0)
os.close(slave)
except:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = p.stdin
return (p, stdin)
def _password_cmd(self):
if self._play_context.password:
try:
p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
except OSError:
raise AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
(self.rfd, self.wfd) = os.pipe()
return ["sshpass", "-d{0}".format(self.rfd)]
return []
def _send_password(self):
if self._play_context.password:
os.close(self.rfd)
os.write(self.wfd, "{0}\n".format(self._play_context.password))
os.close(self.wfd)
def _communicate(self, p, stdin, indata, sudoable=True):
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
# We can't use p.communicate here because the ControlMaster may have stdout open as well
stdout = ''
stderr = ''
rpipes = [p.stdout, p.stderr]
if indata:
try:
stdin.write(indata)
stdin.close()
except:
raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
# Read stdout/stderr from process
while True:
rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
# fail early if the become password is wrong
if self._play_context.become and sudoable:
if self._play_context.become_pass:
self.check_incorrect_password(stdout)
elif self.check_password_prompt(stdout):
raise AnsibleError('Missing %s password' % self._play_context.become_method)
if p.stderr in rfd:
dat = os.read(p.stderr.fileno(), 9000)
stderr += dat
if dat == '':
rpipes.remove(p.stderr)
elif p.stdout in rfd:
dat = os.read(p.stdout.fileno(), 9000)
stdout += dat
if dat == '':
rpipes.remove(p.stdout)
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfd) and p.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually p.poll() is always None here if rpipes is empty
elif not rpipes and p.poll() == None:
p.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
# close stdin after process is terminated and stdout/stderr are read
# completely (see also issue #848)
stdin.close()
return (p.returncode, stdout, stderr)
def not_in_host_file(self, host):
if 'USER' in os.environ:
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_host_file = "~/.ssh/known_hosts"
user_host_file = os.path.expanduser(user_host_file)
host_file_list = []
host_file_list.append(user_host_file)
host_file_list.append("/etc/ssh/ssh_known_hosts")
host_file_list.append("/etc/ssh/ssh_known_hosts2")
hfiles_not_found = 0
for hf in host_file_list:
if not os.path.exists(hf):
hfiles_not_found += 1
continue
try:
host_fh = open(hf)
except IOError as e:
hfiles_not_found += 1
continue
else:
data = host_fh.read()
host_fh.close()
for line in data.split("\n"):
if line is None or " " not in line:
continue
tokens = line.split()
if not tokens:
continue
if isinstance(tokens, list) and tokens: # skip invalid hostlines
if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
# this is a hashed known host entry
try:
(kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
hash.update(host)
if hash.digest() == kn_host.decode('base64'):
return False
except:
# invalid hashed host key, skip it
continue
else:
# standard host file entry
if host in tokens[0]:
return False
if (hfiles_not_found == len(host_file_list)):
self._display.vvv("EXEC previous known host file not found for {0}".format(host))
return True
def lock_host_keys(self, lock):
if C.HOST_KEY_CHECKING and self.not_in_host_file(self.host):
if lock:
action = fcntl.LOCK_EX
else:
action = fcntl.LOCK_UN
# lock around the initial SSH connectivity so the user prompt about whether to add
# the host to known hosts is not intermingled with multiprocess output.
# FIXME: move the locations of these lock files, same as init above, these came from runner, probably need to be in task_executor
# fcntl.lockf(self.process_lockfile, action)
# fcntl.lockf(self.output_lockfile, action)
def exec_command(self, *args, **kwargs):
"""
Wrapper around _exec_command to retry in the case of an ssh failure
Will retry if:
* an exception is caught
* ssh returns 255
Will not retry if
* remaining_tries is <2
* retries limit reached
"""
remaining_tries = int(C.ANSIBLE_SSH_RETRIES) + 1
cmd_summary = "%s..." % args[0]
for attempt in xrange(remaining_tries):
try:
return_tuple = self._exec_command(*args, **kwargs)
# 0 = success
# 1-254 = remote command return code
# 255 = failure from the ssh command itself
if return_tuple[0] != 255 or attempt == (remaining_tries - 1):
break
else:
raise AnsibleConnectionFailure("Failed to connect to the host via ssh.")
except (AnsibleConnectionFailure, Exception) as e:
if attempt == remaining_tries - 1:
raise e
else:
pause = 2 ** attempt - 1
if pause > 30:
pause = 30
if isinstance(e, AnsibleConnectionFailure):
msg = "ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt, cmd_summary, pause)
else:
msg = "ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), pausing for %d seconds" % (attempt, e, cmd_summary, pause)
self._display.vv(msg)
time.sleep(pause)
continue
return return_tuple
def _exec_command(self, cmd, tmp_path, in_data=None, sudoable=True):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable)
ssh_cmd = self._password_cmd()
ssh_cmd += ("ssh", "-C")
if not in_data:
# we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python
# inside a tty automatically invokes the python interactive-mode but the modules are not
# compatible with the interactive-mode ("unexpected indent" mainly because of empty lines)
ssh_cmd.append("-tt")
if self._play_context.verbosity > 3:
ssh_cmd.append("-vvv")
else:
ssh_cmd.append("-q")
ssh_cmd += self._common_args
ssh_cmd.append(self.host)
ssh_cmd.append(cmd)
self._display.vvv("EXEC {0}".format(' '.join(ssh_cmd)), host=self.host)
self.lock_host_keys(True)
# create process
(p, stdin) = self._run(ssh_cmd, in_data)
self._send_password()
no_prompt_out = ''
no_prompt_err = ''
if self._play_context.prompt:
'''
Several cases are handled for privileges with password
* NOPASSWD (tty & no-tty): detect success_key on stdout
* without NOPASSWD:
* detect prompt on stdout (tty)
* detect prompt on stderr (no-tty)
'''
self._display.debug("Handling privilege escalation password prompt.")
if self._play_context.become and self._play_context.become_pass:
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
become_output = ''
become_errput = ''
passprompt = False
while True:
self._display.debug('Waiting for Privilege Escalation input')
if self.check_become_success(become_output + become_errput):
self._display.debug('Succeded!')
break
elif self.check_password_prompt(become_output) or self.check_password_prompt(become_errput):
self._display.debug('Password prompt!')
passprompt = True
break
self._display.debug('Read next chunks')
rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout], self._play_context.timeout)
if not rfd:
# timeout. wrap up process communication
stdout, stderr = p.communicate()
raise AnsibleError('Connection error waiting for privilege escalation password prompt: %s' % become_output)
elif p.stderr in rfd:
chunk = p.stderr.read()
become_errput += chunk
self._display.debug('stderr chunk is: %s' % chunk)
self.check_incorrect_password(become_errput)
elif p.stdout in rfd:
chunk = p.stdout.read()
become_output += chunk
self._display.debug('stdout chunk is: %s' % chunk)
if not chunk:
break
#raise AnsibleError('Connection closed waiting for privilege escalation password prompt: %s ' % become_output)
if passprompt:
self._display.debug("Sending privilege escalation password.")
stdin.write(self._play_context.become_pass + '\n')
else:
no_prompt_out = become_output
no_prompt_err = become_errput
(returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable)
self.lock_host_keys(False)
controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr
if C.HOST_KEY_CHECKING:
if ssh_cmd[0] == "sshpass" and p.returncode == 6:
raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
if p.returncode != 0 and controlpersisterror:
raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
# FIXME: module name isn't in runner
#if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'):
if p.returncode == 255 and in_data:
raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
return (p.returncode, '', no_prompt_out + stdout, no_prompt_err + stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path)
self._display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path))
cmd = self._password_cmd()
# scp and sftp require square brackets for IPv6 addresses, but
# accept them for hostnames and IPv4 addresses too.
host = '[%s]' % self.host
if C.DEFAULT_SCP_IF_SSH:
cmd.append('scp')
cmd.extend(self._common_args)
cmd.extend([in_path, '{0}:{1}'.format(host, pipes.quote(out_path))])
indata = None
else:
cmd.append('sftp')
cmd.extend(self._common_args)
cmd.append(host)
indata = "put {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))
(p, stdin) = self._run(cmd, indata)
self._send_password()
(returncode, stdout, stderr) = self._communicate(p, stdin, indata)
if returncode != 0:
raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}".format(out_path, stdout, stderr))
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
super(Connection, self).fetch_file(in_path, out_path)
self._display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
cmd = self._password_cmd()
if C.DEFAULT_SCP_IF_SSH:
cmd.append('scp')
cmd.extend(self._common_args)
cmd.extend(['{0}:{1}'.format(self.host, in_path), out_path])
indata = None
else:
cmd.append('sftp')
# sftp batch mode allows us to correctly catch failed transfers,
# but can be disabled if for some reason the client side doesn't
# support the option
if C.DEFAULT_SFTP_BATCH_MODE:
cmd.append('-b')
cmd.append('-')
cmd.extend(self._common_args)
cmd.append(self.host)
indata = "get {0} {1}\n".format(in_path, out_path)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self._send_password()
stdout, stderr = p.communicate(indata)
if p.returncode != 0:
raise AnsibleError("failed to transfer file from {0}:\n{1}\n{2}".format(in_path, stdout, stderr))
def close(self):
''' not applicable since we're executing openssh binaries '''
if self._connected:
if 'ControlMaster' in self._common_args:
cmd = ['ssh','-O','stop']
cmd.extend(self._common_args)
cmd.append(self._play_context.remote_addr)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self._connected = False
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Building Swift SDK for Android on Windows
Visual Studio 2019 or newer is needed to build the Swift SDK for Android on
Windows.
## 1. Install Dependencies
- Install the latest version of [Visual Studio](https://www.visualstudio.com/downloads/)
- Make sure to include the android NDK in your installation.
## 1. Clone the repositories
1. Configure git to work with Unix file endings
1. Clone `apple/swift-llvm` into a directory named `llvm`
1. Clone `apple/swift-corelibs-libdispatch` into a directory named `swift-corelibs-libdispatch`
1. Clone `apple/swift-corelibs-foundation` into a directory named `swift-corelibs-foundation`
1. Clone `apple/swift-corelibs-xctest` into a directory named `swift-corelibs-xctest`
1. Clone `compnerd/swift-build` into a directory named `swift-build`
- Currently, other repositories in the Swift project have not been tested and
may not be supported.
This guide assumes that your sources live at the root of `S:`. If your sources
live elsewhere, you can create a substitution for this:
```cmd
subst S: <path to sources>
```
```cmd
S:
git clone https://github.com/apple/swift-llvm llvm
git clone https://github.com/apple/swift-corelibs-libdispatch swift-corelibs-libdispatch
git clone https://github.com/apple/swift-corelibs-foundation swift-corelibs-foundation
git clone https://github.com/apple/swift-corelibs-xctest swift-corelibs-xctest
git clone https://github.com/compnerd/swift-build swift-build
```
## 1. Acquire the latest toolchain and dependencies
1. Download the toolchain, ICU, libxml2, and curl for android from
[Azure](https://dev.azure.com/compnerd/swift-build) into `S:\b\a\Library`.
- You can alternatively use `swift-build.py` from
[compnerd/swift-build](https://www.github.com/compnerd/swift-build) under
the utilities directory.
## 1. Configure LLVM
```cmd
md S:\b\a\llvm
cd S:\b\a\llvm
cmake -C S:\swift-build\cmake\caches\android-armv7.cmake ^
-G Ninja ^
-DCMAKE_BUILD_TYPE=Release ^
-DCMAKE_TOOLCHAIN_FILE=S:\swift-build\cmake\toolchains\android.toolchain.cmake ^
-DANDROID_ALTERNATE_TOOLCHAIN=S:/b/a/Library/Developer/Toolchains/unknown-Asserts-development.xctoolchain/usr ^
-DLLVM_HOST_TRIPLE=armv7-unknown-linux-androideabi ^
S:/llvm
```
## 1. Build and install the standard library
- We must build and install the standard library to build the remainder of the
SDK
```cmd
md S:\b\a\stdlib
cd S:\b\a\stdlib
cmake -C S:\swift-build\cmake\caches\android-armv7.cmake ^
-C S:\swift-build\cmake\caches\swift-stdlib-android-armv7.cmake ^
-G Ninja ^
-DCMAKE_BUILD_TYPE=RelWithDebInfo ^
-DCMAKE_INSTALL_PREFIX=S:/b/a/Library/Developer/Platforms/android.platform/Developer/SDKs/android.sdk/usr ^
-DCMAKE_TOOLCHAIN_FILE=S:\swift-build\cmake\toolchains\android.toolchain.cmake ^
-DANDROID_ALTERNATE_TOOLCHAIN=S:/b/a/Library/Developer/Toolchains/unknown-Asserts-development.xctoolchain/usr ^
-DLLVM_DIR=S:/b/a/llvm/lib/cmake/llvm ^
-DSWIFT_NATIVE_SWIFT_TOOLS_PATH=S:/b/a/Library/Developer/Toolchains/unknown-Asserts-development.xctoolchain/usr/bin ^
S:/swift
ninja
ninja install
```
## 1. Build libdispatch
- We *cannot* install libdispatch until after all builds are complete as that
will cause the Dispatch module to be imported twice and fail to build.
```cmd
md S:\b\a\libdispatch
cd S:\b\a\libdispatch
cmake -C S:\swift-build\cmake\caches\android-armv7.cmake ^
-DSWIFT_ANDROID_SDK=S:/b/a/Library/Developer/Platforms/android.platform/Developer/SDKs/android.sdk ^
-C S:\swift-build\cmake\caches\android-armv7-swift-flags.cmake ^
-G Ninja ^
-DCMAKE_BUILD_TYPE=RelWithDebInfo ^
-DCMAKE_INSTALL_PREFIX=S:/b/a/Library/Developer/Platforms/android.platform/Developer/SDKs/android.sdk/usr ^
-DCMAKE_SWIFT_COMPILER=S:/b/a/Library/Developer/Toolchains/unknown-Asserts-development.xctoolchain/usr/bin/swiftc.exe ^
-DCMAKE_TOOLCHAIN_FILE=S:\swift-build\cmake\toolchains\android.toolchain.cmake ^
-DANDROID_ALTERNATE_TOOLCHAIN=S:/b/a/Library/Developer/Toolchains/unknown-Asserts-development.xctoolchain/usr ^
-DENABLE_SWIFT=YES ^
-DENABLE_TESTING=NO ^
S:/swift-corelibs-libdispatch
ninja
```
## 1. Build foundation
```cmd
md S:\b\a\foundation
cd S:\b\a\foundation
cmake -C S:\swift-build\cmake\caches\android-armv7.cmake ^
-DSWIFT_ANDROID_SDK=S:/b/a/Library/Developer/Platforms/android.platform/Developer/SDKs/android.sdk ^
-C S:\swift-build\cmake\caches\android-armv7-swift-flags.cmake ^
-G Ninja ^
-DCMAKE_BUILD_TYPE=RelWithDebInfo ^
-DCMAKE_INSTALL_PREFIX=S:/b/a/Library/Developer/Platforms/android.platform/Developer/SDKs/android.sdk/usr ^
-DCMAKE_SWIFT_COMPILER=S:/b/a/Library/Developer/Toolchains/unknown-Asserts-development.xctoolchain/usr/bin/swiftc.exe ^
-DCMAKE_TOOLCHAIN_FILE=S:\swift-build\cmake\toolchains\android.toolchain.cmake ^
-DANDROID_ALTERNATE_TOOLCHAIN=S:/b/a/Library/Developer/Toolchains/unknown-Asserts-development.xctoolchain/usr ^
-DCURL_LIBRARY=S:/b/a/Library/libcurl-development/usr/lib/libcurl.a ^
-DCURL_INCLUDE_DIR=S:/b/a/Library/libcurl-development/usr/include ^
-DICU_INCLUDE_DIR=S:/b/a/Library/icu-64/usr/include ^
-DICU_UC_LIBRARY=S:/b/a/Library/icu-64/usr/lib/libicuuc64.so ^
-DICU_UC_LIBRARY_RELEASE=S:/b/a/Library/icu-64/usr/lib/libicuuc64.so ^
-DICU_I18N_LIBRARY=S:/b/a/Library/icu-64/usr/lib/libiucin64.so ^
-DICU_I18N_LIBRARY_RELEASE=S:/b/a/Library/icu-64/usr/lib/libicuin64.so ^
-DLIBXML2_LIBRARY=S:/b/a/Library/libxml2-development/usr/lib/libxml2.a ^
-DLIBXML2_INCLUDE_DIR=S:/b/a/Library/libxml2-development/usr/include/libxml2 ^
-DFOUNDATION_PATH_TO_LIBDISPATCH_SOURCE=S:/swift-corelibs-libdispatch ^
-DFOUNDATION_PATH_TO_LIBDISPATCH_BUILD=S:/b/a/libdispatch ^
S:/swift-corelibs-foundation
ninja
```
## 1. Build XCTest
```cmd
md S:\b\a\xctest
cd S:\b\a\xctest
cmake -C S:\swift-build\cmake\caches\android-armv7.cmake ^
-C S:\swift-build\cmake\caches\android-armv7-swift-flags.cmake ^
-G Ninja ^
-DCMAKE_BUILD_TYPE=RelWithDebInfo ^
-DCMAKE_INSTALL_PREFIX=S:/b/a/Library/Developer/Platforms/android.platform/Developer/SDKs/android.sdk/usr ^
-DCMAKE_SWIFT_COMPILER=S:/b/a/Library/Developer/Toolchains/unknown-Asserts-development.xctoolchain/usr/bin/swiftc.exe ^
-DCMAKE_TOOLCHAIN_FILE=S:\swift-build\cmake\toolchains\android.toolchain.cmake ^
-DANDROID_ALTERNATE_TOOLCHAIN=S:/b/a/Library/Developer/Toolchains/unknown-Asserts-development.xctoolchain/usr ^
-DSWIFT_ANDROID_SDK=S:/b/a/Library/Developer/Platforms/andrfoid.platform/Developer/SDKs/android.sdk ^
-DXCTEST_PATH_TO_FOUNDATION_BUILD=S:/b/a/foundation ^
-DXCTEST_PATH_TO_LIBDISPATCH_SOURCE=S:/swift-corelibs-libdispatch ^
-DXCTEST_PATH_TO_LIBDISPATCH_BUILD=S:/b/a/libdispatch ^
-DENABLE_TESTING=NO ^
S:/swift-corelibs-foundation
ninja
```
## 1. Install libdispatch
```cmd
cd S:\b\a\libdispatch
ninja install
```
## 1. Install Foundation
```cmd
cd S:\b\a\foundation
ninja install
```
## 1. Install XCTest
```cmd
cd S:\b\a\xctest
ninja install
```
|
unknown
|
github
|
https://github.com/apple/swift
|
docs/AndroidBuild.md
|
import os, re, logging, time, socket
from autotest.client.shared import error, utils
from autotest.client.shared.barrier import listen_server
from autotest.client.shared.syncdata import SyncData
from virttest import utils_test, utils_misc
def run_migration_multi_host_with_speed_measurement(test, params, env):
"""
KVM migration test:
1) Get a live VM and clone it.
2) Verify that the source VM supports migration. If it does, proceed with
the test.
3) Start memory load in vm.
4) Set defined migration speed.
5) Send a migration command to the source VM and collecting statistic
of migration speed.
!) Checks that migration utilisation didn't slow down in guest stresser
which would lead to less page-changes than required for this test.
(migration speed is set too high for current CPU)
6) Kill both VMs.
7) Print statistic of migration.
@param test: kvm test object.
@param params: Dictionary with test parameters.
@param env: Dictionary with the test environment.
"""
mig_protocol = params.get("mig_protocol", "tcp")
base_class = utils_test.MultihostMigration
if mig_protocol == "fd":
base_class = utils_test.MultihostMigrationFd
if mig_protocol == "exec":
base_class = utils_test.MultihostMigrationExec
install_path = params.get("cpuflags_install_path", "/tmp")
vm_mem = int(params.get("mem", "512"))
get_mig_speed = re.compile("^transferred ram: (\d+) kbytes$",
re.MULTILINE)
mig_speed = params.get("mig_speed", "1G")
mig_speed_accuracy = float(params.get("mig_speed_accuracy", "0.2"))
def get_migration_statistic(vm):
last_transfer_mem = 0
transfered_mem = 0
mig_stat = utils.Statistic()
for _ in range(30):
o = vm.monitor.info("migrate")
warning_msg = ("Migration already ended. Migration speed is"
" probably too high and will block vm while"
" filling its memory.")
fail_msg = ("Could not determine the transferred memory from"
" monitor data: %s" % o)
if isinstance(o, str):
if not "status: active" in o:
raise error.TestWarn(warning_msg)
try:
transfered_mem = int(get_mig_speed.search(o).groups()[0])
except (IndexError, ValueError):
raise error.TestFail(fail_msg)
else:
if o.get("status") != "active":
raise error.TestWarn(warning_msg)
try:
transfered_mem = o.get("ram").get("transferred") / (1024)
except (IndexError, ValueError):
raise error.TestFail(fail_msg)
real_mig_speed = (transfered_mem - last_transfer_mem) / 1024
last_transfer_mem = transfered_mem
logging.debug("Migration speed: %s MB/s" % (real_mig_speed))
mig_stat.record(real_mig_speed)
time.sleep(1)
return mig_stat
class TestMultihostMigration(base_class):
def __init__(self, test, params, env):
super(TestMultihostMigration, self).__init__(test, params, env)
self.mig_stat = None
self.srchost = self.params.get("hosts")[0]
self.dsthost = self.params.get("hosts")[1]
self.id = {'src': self.srchost,
'dst': self.dsthost,
"type": "speed_measurement"}
self.link_speed = 0
def check_vms(self, mig_data):
"""
Check vms after migrate.
@param mig_data: object with migration data.
"""
pass
def migrate_vms_src(self, mig_data):
"""
Migrate vms source.
@param mig_Data: Data for migration.
For change way how machine migrates is necessary
re implement this method.
"""
super_cls = super(TestMultihostMigration, self)
super_cls.migrate_vms_src(mig_data)
vm = mig_data.vms[0]
self.mig_stat = get_migration_statistic(vm)
def migration_scenario(self):
sync = SyncData(self.master_id(), self.hostid, self.hosts,
self.id, self.sync_server)
srchost = self.params.get("hosts")[0]
dsthost = self.params.get("hosts")[1]
vms = [params.get("vms").split()[0]]
def worker(mig_data):
vm = mig_data.vms[0]
session = vm.wait_for_login(timeout=self.login_timeout)
utils_misc.install_cpuflags_util_on_vm(test, vm, install_path,
extra_flags="-msse3 -msse2")
cmd = ("%s/cpuflags-test --stressmem %d,%d" %
(os.path.join(install_path, "test_cpu_flags"),
vm_mem * 4, vm_mem / 2))
logging.debug("Sending command: %s" % (cmd))
session.sendline(cmd)
if self.master_id() == self.hostid:
server_port = utils_misc.find_free_port(5200, 6000)
server = listen_server(port=server_port)
data_len = 0
sync.sync(server_port, timeout=120)
client = server.socket.accept()[0]
endtime = time.time() + 30
while endtime > time.time():
data_len += len(client.recv(2048))
client.close()
server.close()
self.link_speed = data_len / (30 * 1024 * 1024)
logging.info("Link speed %d MB/s" % (self.link_speed))
ms = utils.convert_data_size(mig_speed, 'M')
if (ms > data_len / 30):
logging.warn("Migration speed %s MB/s is set faster than "
"real link speed %d MB/s" % (mig_speed,
self.link_speed))
else:
self.link_speed = ms / (1024 * 1024)
else:
data = ""
for _ in range(10000):
data += "i"
server_port = sync.sync(timeout=120)[self.master_id()]
sock = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
sock.connect((self.master_id(), server_port))
try:
endtime = time.time() + 10
while endtime > time.time():
sock.sendall(data)
sock.close()
except:
pass
self.migrate_wait(vms, srchost, dsthost, worker)
mig = TestMultihostMigration(test, params, env)
#Start migration
mig.run()
#If machine is migration master check migration statistic.
if mig.master_id() == mig.hostid:
mig_speed = utils.convert_data_size(mig_speed, "M")
mig_stat = mig.mig_stat
mig_speed = mig_speed / (1024 * 1024)
real_speed = mig_stat.get_average()
ack_speed = mig.link_speed * mig_speed_accuracy
logging.info("Target migration speed: %d MB/s", mig_speed)
logging.info("Real Link speed: %d MB/s", mig.link_speed)
logging.info("Average migration speed: %d MB/s", mig_stat.get_average())
logging.info("Minimum migration speed: %d MB/s", mig_stat.get_min())
logging.info("Maximum migration speed: %d MB/s", mig_stat.get_max())
logging.info("Maximum tolerable divergence: %3.1f%%",
mig_speed_accuracy*100)
if real_speed < mig_speed - ack_speed:
divergence = (1 - float(real_speed)/float(mig_speed)) * 100
raise error.TestWarn("Average migration speed (%s MB/s) "
"is %3.1f%% lower than target (%s MB/s)" %
(real_speed, divergence, mig_speed))
if real_speed > mig_speed + ack_speed:
divergence = (1 - float(mig_speed)/float(real_speed)) * 100
raise error.TestWarn("Average migration speed (%s MB/s) "
"is %3.1f %% higher than target (%s MB/s)" %
(real_speed, divergence, mig_speed))
|
unknown
|
codeparrot/codeparrot-clean
| ||
""" version info, help messages, tracing configuration. """
import py
import pytest
import os, sys
def pytest_addoption(parser):
group = parser.getgroup('debugconfig')
group.addoption('--version', action="store_true",
help="display pytest lib version and import information.")
group._addoption("-h", "--help", action="store_true", dest="help",
help="show help message and configuration info")
group._addoption('-p', action="append", dest="plugins", default = [],
metavar="name",
help="early-load given plugin (multi-allowed). "
"To avoid loading of plugins, use the `no:` prefix, e.g. "
"`no:doctest`.")
group.addoption('--traceconfig', '--trace-config',
action="store_true", default=False,
help="trace considerations of conftest.py files."),
group.addoption('--debug',
action="store_true", dest="debug", default=False,
help="store internal tracing debug information in 'pytestdebug.log'.")
@pytest.hookimpl(hookwrapper=True)
def pytest_cmdline_parse():
outcome = yield
config = outcome.get_result()
if config.option.debug:
path = os.path.abspath("pytestdebug.log")
debugfile = open(path, 'w')
debugfile.write("versions pytest-%s, py-%s, "
"python-%s\ncwd=%s\nargs=%s\n\n" %(
pytest.__version__, py.__version__,
".".join(map(str, sys.version_info)),
os.getcwd(), config._origargs))
config.trace.root.setwriter(debugfile.write)
undo_tracing = config.pluginmanager.enable_tracing()
sys.stderr.write("writing pytestdebug information to %s\n" % path)
def unset_tracing():
debugfile.close()
sys.stderr.write("wrote pytestdebug information to %s\n" %
debugfile.name)
config.trace.root.setwriter(None)
undo_tracing()
config.add_cleanup(unset_tracing)
def pytest_cmdline_main(config):
if config.option.version:
p = py.path.local(pytest.__file__)
sys.stderr.write("This is pytest version %s, imported from %s\n" %
(pytest.__version__, p))
plugininfo = getpluginversioninfo(config)
if plugininfo:
for line in plugininfo:
sys.stderr.write(line + "\n")
return 0
elif config.option.help:
config._do_configure()
showhelp(config)
config._ensure_unconfigure()
return 0
def showhelp(config):
reporter = config.pluginmanager.get_plugin('terminalreporter')
tw = reporter._tw
tw.write(config._parser.optparser.format_help())
tw.line()
tw.line()
#tw.sep( "=", "config file settings")
tw.line("[pytest] ini-options in the next "
"pytest.ini|tox.ini|setup.cfg file:")
tw.line()
for name in config._parser._ininames:
help, type, default = config._parser._inidict[name]
if type is None:
type = "string"
spec = "%s (%s)" % (name, type)
line = " %-24s %s" %(spec, help)
tw.line(line[:tw.fullwidth])
tw.line()
tw.line("environment variables:")
vars = [
("PYTEST_ADDOPTS", "extra command line options"),
("PYTEST_PLUGINS", "comma-separated plugins to load during startup"),
("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals")
]
for name, help in vars:
tw.line(" %-24s %s" % (name, help))
tw.line()
tw.line()
tw.line("to see available markers type: py.test --markers")
tw.line("to see available fixtures type: py.test --fixtures")
tw.line("(shown according to specified file_or_dir or current dir "
"if not specified)")
for warningreport in reporter.stats.get('warnings', []):
tw.line("warning : " + warningreport.message, red=True)
return
conftest_options = [
('pytest_plugins', 'list of plugin names to load'),
]
def getpluginversioninfo(config):
lines = []
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
lines.append("setuptools registered plugins:")
for plugin, dist in plugininfo:
loc = getattr(plugin, '__file__', repr(plugin))
content = "%s-%s at %s" % (dist.project_name, dist.version, loc)
lines.append(" " + content)
return lines
def pytest_report_header(config):
lines = []
if config.option.debug or config.option.traceconfig:
lines.append("using: pytest-%s pylib-%s" %
(pytest.__version__,py.__version__))
verinfo = getpluginversioninfo(config)
if verinfo:
lines.extend(verinfo)
if config.option.traceconfig:
lines.append("active plugins:")
items = config.pluginmanager.list_name_plugin()
for name, plugin in items:
if hasattr(plugin, '__file__'):
r = plugin.__file__
else:
r = repr(plugin)
lines.append(" %-20s: %s" %(name, r))
return lines
|
unknown
|
codeparrot/codeparrot-clean
| ||
---
c: Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
Title: CURLOPT_MAIL_RCPT
Section: 3
Source: libcurl
See-also:
- CURLOPT_MAIL_AUTH (3)
- CURLOPT_MAIL_FROM (3)
Protocol:
- SMTP
Added-in: 7.20.0
---
# NAME
CURLOPT_MAIL_RCPT - list of SMTP mail recipients
# SYNOPSIS
~~~c
#include <curl/curl.h>
CURLcode curl_easy_setopt(CURL *handle, CURLOPT_MAIL_RCPT,
struct curl_slist *rcpts);
~~~
# DESCRIPTION
Pass a pointer to a linked list of recipients to pass to the server in your
SMTP mail request. The linked list should be a fully valid list of
**struct curl_slist** structs properly filled in. Use curl_slist_append(3) to
create the list and curl_slist_free_all(3) to clean up an entire list.
libcurl does not copy the list, it needs to be kept around until after the
transfer has completed.
When performing a mail transfer, each recipient should be specified within a
pair of angled brackets (\<\>), however, should you not use an angled bracket
as the first character libcurl assumes you provided a single email address and
encloses that address within brackets for you.
In order to specify DSN parameters (as per RFC 3461), the address has to be
written in angled brackets, followed by the parameters.
When performing an address verification (**VRFY** command), each recipient
should be specified as the username or username plus domain (as per Section
3.5 of RFC 5321).
When performing a mailing list expand (**EXPN** command), each recipient
should be specified using the mailing list name, such as `Friends` or
`London-Office`.
Using this option multiple times makes the last set list override the previous
ones. Set it to NULL to disable its use again.
# DEFAULT
NULL
# %PROTOCOLS%
# EXAMPLE
~~~c
int main(void)
{
CURL *curl = curl_easy_init();
if(curl) {
CURLcode result;
struct curl_slist *list;
list = curl_slist_append(NULL, "root@localhost");
list = curl_slist_append(list, "person@example.com");
list = curl_slist_append(list, "<other@example.com> NOTIFY=SUCCESS");
curl_easy_setopt(curl, CURLOPT_URL, "smtp://example.com/");
curl_easy_setopt(curl, CURLOPT_MAIL_RCPT, list);
result = curl_easy_perform(curl);
curl_slist_free_all(list);
curl_easy_cleanup(curl);
}
}
~~~
# %AVAILABILITY%
# RETURN VALUE
curl_easy_setopt(3) returns a CURLcode indicating success or error.
CURLE_OK (0) means everything was OK, non-zero means an error occurred, see
libcurl-errors(3).
|
unknown
|
github
|
https://github.com/curl/curl
|
docs/libcurl/opts/CURLOPT_MAIL_RCPT.md
|
import datetime
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models.loading import get_model
import haystack
from haystack.backends import BaseEngine, BaseSearchBackend, BaseSearchQuery, log_query
from haystack.constants import ID, DJANGO_CT, DJANGO_ID, DEFAULT_OPERATOR
from haystack.exceptions import MissingDependency, MoreLikeThisError
from haystack.inputs import PythonData, Clean, Exact
from haystack.models import SearchResult
from haystack.utils import get_identifier
from haystack.utils import log as logging
try:
import requests
except ImportError:
raise MissingDependency("The 'elasticsearch' backend requires the installation of 'requests'.")
try:
import pyelasticsearch
except ImportError:
raise MissingDependency("The 'elasticsearch' backend requires the installation of 'pyelasticsearch'. Please refer to the documentation.")
class ElasticsearchSearchBackend(BaseSearchBackend):
# Word reserved by Elasticsearch for special use.
RESERVED_WORDS = (
'AND',
'NOT',
'OR',
'TO',
)
# Characters reserved by Elasticsearch for special use.
# The '\\' must come first, so as not to overwrite the other slash replacements.
RESERVED_CHARACTERS = (
'\\', '+', '-', '&&', '||', '!', '(', ')', '{', '}',
'[', ']', '^', '"', '~', '*', '?', ':',
)
# Settings to add an n-gram & edge n-gram analyzer.
DEFAULT_SETTINGS = {
'settings': {
"analysis": {
"analyzer": {
"ngram_analyzer": {
"type": "custom",
"tokenizer": "lowercase",
"filter": ["haystack_ngram"]
},
"edgengram_analyzer": {
"type": "custom",
"tokenizer": "lowercase",
"filter": ["haystack_edgengram"]
}
},
"tokenizer": {
"haystack_ngram_tokenizer": {
"type": "nGram",
"min_gram": 3,
"max_gram": 15,
},
"haystack_edgengram_tokenizer": {
"type": "edgeNGram",
"min_gram": 2,
"max_gram": 15,
"side": "front"
}
},
"filter": {
"haystack_ngram": {
"type": "nGram",
"min_gram": 3,
"max_gram": 15
},
"haystack_edgengram": {
"type": "edgeNGram",
"min_gram": 2,
"max_gram": 15
}
}
}
}
}
def __init__(self, connection_alias, **connection_options):
super(ElasticsearchSearchBackend, self).__init__(connection_alias, **connection_options)
if not 'URL' in connection_options:
raise ImproperlyConfigured("You must specify a 'URL' in your settings for connection '%s'." % connection_alias)
if not 'INDEX_NAME' in connection_options:
raise ImproperlyConfigured("You must specify a 'INDEX_NAME' in your settings for connection '%s'." % connection_alias)
self.conn = pyelasticsearch.ElasticSearch(connection_options['URL'], timeout=self.timeout)
self.index_name = connection_options['INDEX_NAME']
self.log = logging.getLogger('haystack')
self.setup_complete = False
self.existing_mapping = {}
def setup(self):
"""
Defers loading until needed.
"""
# Get the existing mapping & cache it. We'll compare it
# during the ``update`` & if it doesn't match, we'll put the new
# mapping.
try:
self.existing_mapping = self.conn.get_mapping(index=self.index_name)
except Exception:
if not self.silently_fail:
raise
unified_index = haystack.connections[self.connection_alias].get_unified_index()
self.content_field_name, field_mapping = self.build_schema(unified_index.all_searchfields())
current_mapping = {
'modelresult': {
'properties': field_mapping
}
}
if current_mapping != self.existing_mapping:
try:
# Make sure the index is there first.
self.conn.create_index(self.index_name, self.DEFAULT_SETTINGS)
self.conn.put_mapping(self.index_name, 'modelresult', current_mapping)
self.existing_mapping = current_mapping
except Exception:
if not self.silently_fail:
raise
self.setup_complete = True
def update(self, index, iterable, commit=True):
if not self.setup_complete:
try:
self.setup()
except (requests.RequestException, pyelasticsearch.ElasticHttpError), e:
if not self.silently_fail:
raise
self.log.error("Failed to add documents to Elasticsearch: %s", e)
return
prepped_docs = []
for obj in iterable:
try:
prepped_data = index.full_prepare(obj)
final_data = {}
# Convert the data to make sure it's happy.
for key, value in prepped_data.items():
final_data[key] = self.conn.from_python(value)
prepped_docs.append(final_data)
except (requests.RequestException, pyelasticsearch.ElasticHttpError), e:
if not self.silently_fail:
raise
# We'll log the object identifier but won't include the actual object
# to avoid the possibility of that generating encoding errors while
# processing the log message:
self.log.error(u"%s while preparing object for update" % e.__name__, exc_info=True, extra={
"data": {
"index": index,
"object": get_identifier(obj)
}
})
self.conn.bulk_index(self.index_name, 'modelresult', prepped_docs, id_field=ID)
if commit:
self.conn.refresh(index=self.index_name)
def remove(self, obj_or_string, commit=True):
doc_id = get_identifier(obj_or_string)
if not self.setup_complete:
try:
self.setup()
except (requests.RequestException, pyelasticsearch.ElasticHttpError), e:
if not self.silently_fail:
raise
self.log.error("Failed to remove document '%s' from Elasticsearch: %s", doc_id, e)
return
try:
self.conn.delete(self.index_name, 'modelresult', doc_id)
if commit:
self.conn.refresh(index=self.index_name)
except (requests.RequestException, pyelasticsearch.ElasticHttpError), e:
if not self.silently_fail:
raise
self.log.error("Failed to remove document '%s' from Elasticsearch: %s", doc_id, e)
def clear(self, models=[], commit=True):
# We actually don't want to do this here, as mappings could be
# very different.
# if not self.setup_complete:
# self.setup()
try:
if not models:
self.conn.delete_index(self.index_name)
else:
models_to_delete = []
for model in models:
models_to_delete.append("%s:%s.%s" % (DJANGO_CT, model._meta.app_label, model._meta.module_name))
# Delete by query in Elasticsearch asssumes you're dealing with
# a ``query`` root object. :/
query = {'query_string': {'query': " OR ".join(models_to_delete)}}
self.conn.delete_by_query(self.index_name, 'modelresult', query)
if commit:
self.conn.refresh(index=self.index_name)
except (requests.RequestException, pyelasticsearch.ElasticHttpError), e:
if not self.silently_fail:
raise
if len(models):
self.log.error("Failed to clear Elasticsearch index of models '%s': %s", ','.join(models_to_delete), e)
else:
self.log.error("Failed to clear Elasticsearch index: %s", e)
def build_search_kwargs(self, query_string, sort_by=None, start_offset=0, end_offset=None,
fields='', highlight=False, facets=None,
date_facets=None, query_facets=None,
narrow_queries=None, spelling_query=None,
within=None, dwithin=None, distance_point=None,
models=None, limit_to_registered_models=None,
result_class=None):
index = haystack.connections[self.connection_alias].get_unified_index()
content_field = index.document_field
if query_string == '*:*':
kwargs = {
'query': {
'filtered': {
'query': {
"match_all": {}
},
},
},
}
else:
kwargs = {
'query': {
'filtered': {
'query': {
'query_string': {
'default_field': content_field,
'default_operator': DEFAULT_OPERATOR,
'query': query_string,
'analyze_wildcard': True,
'auto_generate_phrase_queries': True,
},
},
},
},
}
if fields:
if isinstance(fields, (list, set)):
fields = " ".join(fields)
kwargs['fields'] = fields
if sort_by is not None:
order_list = []
for field, direction in sort_by:
if field == 'distance' and distance_point:
# Do the geo-enabled sort.
lng, lat = distance_point['point'].get_coords()
sort_kwargs = {
"_geo_distance": {
distance_point['field']: [lng, lat],
"order": direction,
"unit": "km"
}
}
else:
if field == 'distance':
warnings.warn("In order to sort by distance, you must call the '.distance(...)' method.")
# Regular sorting.
sort_kwargs = {field: {'order': direction}}
order_list.append(sort_kwargs)
kwargs['sort'] = order_list
# From/size offsets don't seem to work right in Elasticsearch's DSL. :/
# if start_offset is not None:
# kwargs['from'] = start_offset
# if end_offset is not None:
# kwargs['size'] = end_offset - start_offset
if highlight is True:
kwargs['highlight'] = {
'fields': {
content_field: {'store': 'yes'},
}
}
if self.include_spelling is True:
warnings.warn("Elasticsearch does not handle spelling suggestions.", Warning, stacklevel=2)
if narrow_queries is None:
narrow_queries = set()
if facets is not None:
kwargs.setdefault('facets', {})
for facet_fieldname in facets:
kwargs['facets'][facet_fieldname] = {
'terms': {
'field': facet_fieldname,
'size': 100,
},
}
if date_facets is not None:
kwargs.setdefault('facets', {})
for facet_fieldname, value in date_facets.items():
# Need to detect on gap_by & only add amount if it's more than one.
interval = value.get('gap_by').lower()
# Need to detect on amount (can't be applied on months or years).
if value.get('gap_amount', 1) != 1 and not interval in ('month', 'year'):
# Just the first character is valid for use.
interval = "%s%s" % (value['gap_amount'], interval[:1])
kwargs['facets'][facet_fieldname] = {
'date_histogram': {
'field': facet_fieldname,
'interval': interval,
},
'facet_filter': {
"range": {
facet_fieldname: {
'from': self.conn.from_python(value.get('start_date')),
'to': self.conn.from_python(value.get('end_date')),
}
}
}
}
if query_facets is not None:
kwargs.setdefault('facets', {})
for facet_fieldname, value in query_facets:
kwargs['facets'][facet_fieldname] = {
'query': {
'query_string': {
'query': value,
}
},
}
if limit_to_registered_models is None:
limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
if models and len(models):
model_choices = sorted(['%s.%s' % (model._meta.app_label, model._meta.module_name) for model in models])
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
if narrow_queries is None:
narrow_queries = set()
narrow_queries.add('%s:(%s)' % (DJANGO_CT, ' OR '.join(model_choices)))
if narrow_queries:
kwargs['query'].setdefault('filtered', {})
kwargs['query']['filtered'].setdefault('filter', {})
kwargs['query']['filtered']['filter'] = {
'fquery': {
'query': {
'query_string': {
'query': u' AND '.join(list(narrow_queries)),
},
},
'_cache': True,
}
}
if within is not None:
from haystack.utils.geo import generate_bounding_box
((min_lat, min_lng), (max_lat, max_lng)) = generate_bounding_box(within['point_1'], within['point_2'])
within_filter = {
"geo_bounding_box": {
within['field']: {
"top_left": {
"lat": max_lat,
"lon": min_lng
},
"bottom_right": {
"lat": min_lat,
"lon": max_lng
}
}
},
}
kwargs['query'].setdefault('filtered', {})
kwargs['query']['filtered'].setdefault('filter', {})
if kwargs['query']['filtered']['filter']:
compound_filter = {
"and": [
kwargs['query']['filtered']['filter'],
within_filter,
]
}
kwargs['query']['filtered']['filter'] = compound_filter
else:
kwargs['query']['filtered']['filter'] = within_filter
if dwithin is not None:
lng, lat = dwithin['point'].get_coords()
dwithin_filter = {
"geo_distance": {
"distance": dwithin['distance'].km,
dwithin['field']: {
"lat": lat,
"lon": lng
}
}
}
kwargs['query'].setdefault('filtered', {})
kwargs['query']['filtered'].setdefault('filter', {})
if kwargs['query']['filtered']['filter']:
compound_filter = {
"and": [
kwargs['query']['filtered']['filter'],
dwithin_filter
]
}
kwargs['query']['filtered']['filter'] = compound_filter
else:
kwargs['query']['filtered']['filter'] = dwithin_filter
# Remove the "filtered" key if we're not filtering. Otherwise,
# Elasticsearch will blow up.
if not kwargs['query']['filtered'].get('filter'):
kwargs['query'] = kwargs['query']['filtered']['query']
return kwargs
@log_query
def search(self, query_string, **kwargs):
if len(query_string) == 0:
return {
'results': [],
'hits': 0,
}
if not self.setup_complete:
self.setup()
search_kwargs = self.build_search_kwargs(query_string, **kwargs)
search_kwargs['from'] = kwargs.get('start_offset', 0)
order_fields = set()
for order in search_kwargs.get('sort', []):
for key in order.keys():
order_fields.add(key)
geo_sort = '_geo_distance' in order_fields
end_offset = kwargs.get('end_offset')
start_offset = kwargs.get('start_offset', 0)
if end_offset is not None and end_offset > start_offset:
search_kwargs['size'] = end_offset - start_offset
try:
raw_results = self.conn.search(search_kwargs,
index=self.index_name,
doc_type='modelresult')
except (requests.RequestException, pyelasticsearch.ElasticHttpError), e:
if not self.silently_fail:
raise
self.log.error("Failed to query Elasticsearch using '%s': %s", query_string, e)
raw_results = {}
return self._process_results(raw_results,
highlight=kwargs.get('highlight'),
result_class=kwargs.get('result_class', SearchResult),
distance_point=kwargs.get('distance_point'), geo_sort=geo_sort)
def more_like_this(self, model_instance, additional_query_string=None,
start_offset=0, end_offset=None, models=None,
limit_to_registered_models=None, result_class=None, **kwargs):
from haystack import connections
if not self.setup_complete:
self.setup()
# Deferred models will have a different class ("RealClass_Deferred_fieldname")
# which won't be in our registry:
model_klass = model_instance._meta.concrete_model
index = connections[self.connection_alias].get_unified_index().get_index(model_klass)
field_name = index.get_content_field()
params = {}
if start_offset is not None:
params['search_from'] = start_offset
if end_offset is not None:
params['search_size'] = end_offset - start_offset
doc_id = get_identifier(model_instance)
try:
raw_results = self.conn.more_like_this(self.index_name, 'modelresult', doc_id, [field_name], **params)
except (requests.RequestException, pyelasticsearch.ElasticHttpError), e:
if not self.silently_fail:
raise
self.log.error("Failed to fetch More Like This from Elasticsearch for document '%s': %s", doc_id, e)
raw_results = {}
return self._process_results(raw_results, result_class=result_class)
def _process_results(self, raw_results, highlight=False,
result_class=None, distance_point=None,
geo_sort=False):
from haystack import connections
results = []
hits = raw_results.get('hits', {}).get('total', 0)
facets = {}
spelling_suggestion = None
if result_class is None:
result_class = SearchResult
if 'facets' in raw_results:
facets = {
'fields': {},
'dates': {},
'queries': {},
}
for facet_fieldname, facet_info in raw_results['facets'].items():
if facet_info.get('_type', 'terms') == 'terms':
facets['fields'][facet_fieldname] = [(individual['term'], individual['count']) for individual in facet_info['terms']]
elif facet_info.get('_type', 'terms') == 'date_histogram':
# Elasticsearch provides UTC timestamps with an extra three
# decimals of precision, which datetime barfs on.
facets['dates'][facet_fieldname] = [(datetime.datetime.utcfromtimestamp(individual['time'] / 1000), individual['count']) for individual in facet_info['entries']]
elif facet_info.get('_type', 'terms') == 'query':
facets['queries'][facet_fieldname] = facet_info['count']
unified_index = connections[self.connection_alias].get_unified_index()
indexed_models = unified_index.get_indexed_models()
content_field = unified_index.document_field
for raw_result in raw_results.get('hits', {}).get('hits', []):
source = raw_result['_source']
app_label, model_name = source[DJANGO_CT].split('.')
additional_fields = {}
model = get_model(app_label, model_name)
if model and model in indexed_models:
for key, value in source.items():
index = unified_index.get_index(model)
string_key = str(key)
if string_key in index.fields and hasattr(index.fields[string_key], 'convert'):
additional_fields[string_key] = index.fields[string_key].convert(value)
else:
additional_fields[string_key] = self.conn.to_python(value)
del(additional_fields[DJANGO_CT])
del(additional_fields[DJANGO_ID])
if 'highlight' in raw_result:
additional_fields['highlighted'] = raw_result['highlight'].get(content_field, '')
if distance_point:
additional_fields['_point_of_origin'] = distance_point
if geo_sort and raw_result.get('sort'):
from haystack.utils.geo import Distance
additional_fields['_distance'] = Distance(km=float(raw_result['sort'][0]))
else:
additional_fields['_distance'] = None
result = result_class(app_label, model_name, source[DJANGO_ID], raw_result['_score'], **additional_fields)
results.append(result)
else:
hits -= 1
return {
'results': results,
'hits': hits,
'facets': facets,
'spelling_suggestion': spelling_suggestion,
}
def build_schema(self, fields):
content_field_name = ''
mapping = {}
for field_name, field_class in fields.items():
field_mapping = {
'boost': field_class.boost,
'index': 'analyzed',
'store': 'yes',
'type': 'string',
}
if field_class.document is True:
content_field_name = field_class.index_fieldname
# DRL_FIXME: Perhaps move to something where, if none of these
# checks succeed, call a custom method on the form that
# returns, per-backend, the right type of storage?
if field_class.field_type in ['date', 'datetime']:
field_mapping['type'] = 'date'
elif field_class.field_type == 'integer':
field_mapping['type'] = 'long'
elif field_class.field_type == 'float':
field_mapping['type'] = 'float'
elif field_class.field_type == 'boolean':
field_mapping['type'] = 'boolean'
elif field_class.field_type == 'ngram':
field_mapping['analyzer'] = "ngram_analyzer"
elif field_class.field_type == 'edge_ngram':
field_mapping['analyzer'] = "edgengram_analyzer"
elif field_class.field_type == 'location':
field_mapping['type'] = 'geo_point'
# The docs claim nothing is needed for multivalue...
# if field_class.is_multivalued:
# field_data['multi_valued'] = 'true'
if field_class.stored is False:
field_mapping['store'] = 'no'
# Do this last to override `text` fields.
if field_class.indexed is False or hasattr(field_class, 'facet_for'):
field_mapping['index'] = 'not_analyzed'
if field_mapping['type'] == 'string' and field_class.indexed:
field_mapping["term_vector"] = "with_positions_offsets"
if not hasattr(field_class, 'facet_for') and not field_class.field_type in('ngram', 'edge_ngram'):
field_mapping["analyzer"] = "snowball"
mapping[field_class.index_fieldname] = field_mapping
return (content_field_name, mapping)
# Sucks that this is almost an exact copy of what's in the Solr backend,
# but we can't import due to dependencies.
class ElasticsearchSearchQuery(BaseSearchQuery):
def matching_all_fragment(self):
return '*:*'
def add_spatial(self, lat, lon, sfield, distance, filter='bbox'):
"""Adds spatial query parameters to search query"""
kwargs = {
'lat': lat,
'long': long,
'sfield': sfield,
'distance': distance,
}
self.spatial_query.update(kwargs)
def add_order_by_distance(self, lat, long, sfield):
"""Orders the search result by distance from point."""
kwargs = {
'lat': lat,
'long': long,
'sfield': sfield,
}
self.order_by_distance.update(kwargs)
def build_query_fragment(self, field, filter_type, value):
from haystack import connections
query_frag = ''
if not hasattr(value, 'input_type_name'):
# Handle when we've got a ``ValuesListQuerySet``...
if hasattr(value, 'values_list'):
value = list(value)
if isinstance(value, basestring):
# It's not an ``InputType``. Assume ``Clean``.
value = Clean(value)
else:
value = PythonData(value)
# Prepare the query using the InputType.
prepared_value = value.prepare(self)
if not isinstance(prepared_value, (set, list, tuple)):
# Then convert whatever we get back to what pysolr wants if needed.
prepared_value = self.backend.conn.from_python(prepared_value)
# 'content' is a special reserved word, much like 'pk' in
# Django's ORM layer. It indicates 'no special field'.
if field == 'content':
index_fieldname = ''
else:
index_fieldname = u'%s:' % connections[self._using].get_unified_index().get_index_fieldname(field)
filter_types = {
'contains': u'%s',
'startswith': u'%s*',
'exact': u'%s',
'gt': u'{%s TO *}',
'gte': u'[%s TO *]',
'lt': u'{* TO %s}',
'lte': u'[* TO %s]',
}
if value.post_process is False:
query_frag = prepared_value
else:
if filter_type in ['contains', 'startswith']:
if value.input_type_name == 'exact':
query_frag = prepared_value
else:
# Iterate over terms & incorportate the converted form of each into the query.
terms = []
if isinstance(prepared_value, basestring):
for possible_value in prepared_value.split(' '):
terms.append(filter_types[filter_type] % self.backend.conn.from_python(possible_value))
else:
terms.append(filter_types[filter_type] % self.backend.conn.from_python(prepared_value))
if len(terms) == 1:
query_frag = terms[0]
else:
query_frag = u"(%s)" % " AND ".join(terms)
elif filter_type == 'in':
in_options = []
for possible_value in prepared_value:
in_options.append(u'"%s"' % self.backend.conn.from_python(possible_value))
query_frag = u"(%s)" % " OR ".join(in_options)
elif filter_type == 'range':
start = self.backend.conn.from_python(prepared_value[0])
end = self.backend.conn.from_python(prepared_value[1])
query_frag = u'["%s" TO "%s"]' % (start, end)
elif filter_type == 'exact':
if value.input_type_name == 'exact':
query_frag = prepared_value
else:
prepared_value = Exact(prepared_value).prepare(self)
query_frag = filter_types[filter_type] % prepared_value
else:
if value.input_type_name != 'exact':
prepared_value = Exact(prepared_value).prepare(self)
query_frag = filter_types[filter_type] % prepared_value
if len(query_frag) and not query_frag.startswith('(') and not query_frag.endswith(')'):
query_frag = "(%s)" % query_frag
return u"%s%s" % (index_fieldname, query_frag)
def build_alt_parser_query(self, parser_name, query_string='', **kwargs):
if query_string:
kwargs['v'] = query_string
kwarg_bits = []
for key in sorted(kwargs.keys()):
if isinstance(kwargs[key], basestring) and ' ' in kwargs[key]:
kwarg_bits.append(u"%s='%s'" % (key, kwargs[key]))
else:
kwarg_bits.append(u"%s=%s" % (key, kwargs[key]))
return u"{!%s %s}" % (parser_name, ' '.join(kwarg_bits))
def build_params(self, spelling_query=None, **kwargs):
search_kwargs = {
'start_offset': self.start_offset,
'result_class': self.result_class
}
order_by_list = None
if self.order_by:
if order_by_list is None:
order_by_list = []
for field in self.order_by:
direction = 'asc'
if field.startswith('-'):
direction = 'desc'
field = field[1:]
order_by_list.append((field, direction))
search_kwargs['sort_by'] = order_by_list
if self.date_facets:
search_kwargs['date_facets'] = self.date_facets
if self.distance_point:
search_kwargs['distance_point'] = self.distance_point
if self.dwithin:
search_kwargs['dwithin'] = self.dwithin
if self.end_offset is not None:
search_kwargs['end_offset'] = self.end_offset
if self.facets:
search_kwargs['facets'] = list(self.facets)
if self.fields:
search_kwargs['fields'] = self.fields
if self.highlight:
search_kwargs['highlight'] = self.highlight
if self.models:
search_kwargs['models'] = self.models
if self.narrow_queries:
search_kwargs['narrow_queries'] = self.narrow_queries
if self.query_facets:
search_kwargs['query_facets'] = self.query_facets
if self.within:
search_kwargs['within'] = self.within
if spelling_query:
search_kwargs['spelling_query'] = spelling_query
return search_kwargs
def run(self, spelling_query=None, **kwargs):
"""Builds and executes the query. Returns a list of search results."""
final_query = self.build_query()
search_kwargs = self.build_params(spelling_query, **kwargs)
results = self.backend.search(final_query, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
self._facet_counts = self.post_process_facets(results)
self._spelling_suggestion = results.get('spelling_suggestion', None)
def run_mlt(self, **kwargs):
"""Builds and executes the query. Returns a list of search results."""
if self._more_like_this is False or self._mlt_instance is None:
raise MoreLikeThisError("No instance was provided to determine 'More Like This' results.")
additional_query_string = self.build_query()
search_kwargs = {
'start_offset': self.start_offset,
'result_class': self.result_class,
}
if self.end_offset is not None:
search_kwargs['end_offset'] = self.end_offset - self.start_offset
results = self.backend.more_like_this(self._mlt_instance, additional_query_string, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
class ElasticsearchSearchEngine(BaseEngine):
backend = ElasticsearchSearchBackend
query = ElasticsearchSearchQuery
|
unknown
|
codeparrot/codeparrot-clean
| ||
{
"openapi": "3.1.0",
"info": {
"title": "WolframAlpha",
"version": "v1.7"
},
"servers": [
{
"url": "https://www.wolframalpha.com",
"description": "The WolframAlpha server"
}
],
"paths": {
"/api/v1/spoken.jsp": {
"get": {
"operationId": "getSpokenResult",
"externalDocs": "https://products.wolframalpha.com/spoken-results-api/documentation",
"summary": "Data results from the WolframAlpha Spoken Results API",
"responses": {
"200": {
"description": "the answer to the user's data query",
"content": {
"text/plain": {}
}
},
"501": {
"description": "WolframAlpha was unable to form an answer to the query"
},
"400": {
"description": "The request is missing the i parameter whose value is the query"
},
"403": {
"description": "Unauthorized"
}
},
"parameters": [
{
"name": "i",
"in": "query",
"description": "the user's query",
"required": true,
"schema": {
"type": "string"
}
},
{
"name": "geolocation",
"in": "query",
"description": "comma-separated latitude and longitude of the user",
"required": false,
"style": "form",
"explode": false,
"schema": {
"type": "array",
"items": {
"type": "number"
}
}
}
]
}
},
"/api/v1/result.jsp": {
"get": {
"operationId": "getShortAnswer",
"externalDocs": "https://products.wolframalpha.com/short-answers-api/documentation",
"summary": "Math results from the WolframAlpha Short Answers API",
"responses": {
"200": {
"description": "the answer to the user's math query",
"content": {
"text/plain": {}
}
},
"501": {
"description": "WolframAlpha was unable to form an answer to the query"
},
"400": {
"description": "The request is missing the i parameter whose value is the query"
},
"403": {
"description": "Unauthorized"
}
},
"parameters": [
{
"name": "i",
"in": "query",
"description": "the user's query",
"required": true,
"schema": {
"type": "string"
}
},
{
"name": "geolocation",
"in": "query",
"description": "comma-separated latitude and longitude of the user",
"required": false,
"style": "form",
"explode": false,
"schema": {
"type": "array",
"items": {
"type": "number"
}
}
}
]
}
},
"/api/v1/query.jsp": {
"get": {
"operationId": "getFullResults",
"externalDocs": "https://products.wolframalpha.com/api/documentation",
"summary": "Information from the WolframAlpha Full Results API",
"responses": {
"200": {
"description": "The results of the query, or an error code",
"content": {
"text/xml": {},
"application/json": {}
}
}
},
"parameters": [
{
"name": "assumptionsversion",
"in": "query",
"description": "which version to use for structuring assumptions in the output and in requests",
"required": true,
"schema": {
"type": "integer",
"enum": [
2
]
}
},
{
"name": "input",
"in": "query",
"description": "the user's query",
"required": true,
"schema": {
"type": "string"
}
},
{
"name": "latlong",
"in": "query",
"description": "comma-separated latitude and longitude of the user",
"required": false,
"style": "form",
"explode": false,
"schema": {
"type": "array",
"items": {
"type": "number"
}
}
},
{
"name": "output",
"in": "query",
"description": "the response content type",
"required": true,
"schema": {
"type": "string",
"enum": [
"json"
]
}
},
{
"name": "assumption",
"in": "query",
"description": "the assumption to use, passed back from input in the values array of the assumptions object in the output of a previous query with the same input.",
"required": false,
"explode": true,
"style": "form",
"schema": {
"type": "array",
"items": {
"type": "string"
}
}
},
{
"name": "format",
"in": "query",
"description": "comma-separated elements to include in the response when available.",
"required": false,
"explode": false,
"style": "form",
"schema": {
"type": "array",
"items": {
"type": "string",
"enum": [
"csv",
"tsv",
"image",
"imagemap",
"plaintext",
"sound",
"wav",
"minput",
"moutput",
"cell"
]
}
}
}
]
}
}
}
}
|
json
|
github
|
https://github.com/langchain-ai/langchain
|
libs/langchain/tests/unit_tests/examples/test_specs/wolframcloud/apispec.json
|
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base classes for storage engines
"""
import abc
from oslo_config import cfg
from oslo_db import api as db_api
import six
_BACKEND_MAPPING = {'sqlalchemy': 'ironic.db.sqlalchemy.api'}
IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING,
lazy=True)
def get_instance():
"""Return a DB API instance."""
return IMPL
@six.add_metaclass(abc.ABCMeta)
class Connection(object):
"""Base class for storage system connections."""
@abc.abstractmethod
def __init__(self):
"""Constructor."""
@abc.abstractmethod
def get_nodeinfo_list(self, columns=None, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
"""Get specific columns for matching nodes.
Return a list of the specified columns for all nodes that match the
specified filters.
:param columns: List of column names to return.
Defaults to 'id' column when columns == None.
:param filters: Filters to apply. Defaults to None.
:associated: True | False
:reserved: True | False
:reserved_by_any_of: [conductor1, conductor2]
:maintenance: True | False
:chassis_uuid: uuid of chassis
:driver: driver's name
:provision_state: provision state of node
:provisioned_before:
nodes with provision_updated_at field before this
interval in seconds
:param limit: Maximum number of nodes to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
@abc.abstractmethod
def get_node_list(self, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of nodes.
:param filters: Filters to apply. Defaults to None.
:associated: True | False
:reserved: True | False
:maintenance: True | False
:chassis_uuid: uuid of chassis
:driver: driver's name
:provision_state: provision state of node
:provisioned_before:
nodes with provision_updated_at field before this
interval in seconds
:param limit: Maximum number of nodes to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def reserve_node(self, tag, node_id):
"""Reserve a node.
To prevent other ManagerServices from manipulating the given
Node while a Task is performed, mark it reserved by this host.
:param tag: A string uniquely identifying the reservation holder.
:param node_id: A node id or uuid.
:returns: A Node object.
:raises: NodeNotFound if the node is not found.
:raises: NodeLocked if the node is already reserved.
"""
@abc.abstractmethod
def release_node(self, tag, node_id):
"""Release the reservation on a node.
:param tag: A string uniquely identifying the reservation holder.
:param node_id: A node id or uuid.
:raises: NodeNotFound if the node is not found.
:raises: NodeLocked if the node is reserved by another host.
:raises: NodeNotLocked if the node was found to not have a
reservation at all.
"""
@abc.abstractmethod
def create_node(self, values):
"""Create a new node.
:param values: A dict containing several items used to identify
and track the node, and several dicts which are passed
into the Drivers when managing this node. For example:
::
{
'uuid': uuidutils.generate_uuid(),
'instance_uuid': None,
'power_state': states.POWER_OFF,
'provision_state': states.AVAILABLE,
'driver': 'pxe_ipmitool',
'driver_info': { ... },
'properties': { ... },
'extra': { ... },
}
:returns: A node.
"""
@abc.abstractmethod
def get_node_by_id(self, node_id):
"""Return a node.
:param node_id: The id of a node.
:returns: A node.
"""
@abc.abstractmethod
def get_node_by_uuid(self, node_uuid):
"""Return a node.
:param node_uuid: The uuid of a node.
:returns: A node.
"""
@abc.abstractmethod
def get_node_by_name(self, node_name):
"""Return a node.
:param node_name: The logical name of a node.
:returns: A node.
"""
@abc.abstractmethod
def get_node_by_instance(self, instance):
"""Return a node.
:param instance: The instance name or uuid to search for.
:returns: A node.
"""
@abc.abstractmethod
def destroy_node(self, node_id):
"""Destroy a node and all associated interfaces.
:param node_id: The id or uuid of a node.
"""
@abc.abstractmethod
def update_node(self, node_id, values):
"""Update properties of a node.
:param node_id: The id or uuid of a node.
:param values: Dict of values to update.
May be a partial list, eg. when setting the
properties for a driver. For example:
::
{
'driver_info':
{
'my-field-1': val1,
'my-field-2': val2,
}
}
:returns: A node.
:raises: NodeAssociated
:raises: NodeNotFound
"""
@abc.abstractmethod
def get_port_by_id(self, port_id):
"""Return a network port representation.
:param port_id: The id of a port.
:returns: A port.
"""
@abc.abstractmethod
def get_port_by_uuid(self, port_uuid):
"""Return a network port representation.
:param port_uuid: The uuid of a port.
:returns: A port.
"""
@abc.abstractmethod
def get_port_by_address(self, address):
"""Return a network port representation.
:param address: The MAC address of a port.
:returns: A port.
"""
@abc.abstractmethod
def get_port_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of ports.
:param limit: Maximum number of ports to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def get_ports_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""List all the ports for a given node.
:param node_id: The integer node ID.
:param limit: Maximum number of ports to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted
(asc, desc)
:returns: A list of ports.
"""
@abc.abstractmethod
def create_port(self, values):
"""Create a new port.
:param values: Dict of values.
"""
@abc.abstractmethod
def update_port(self, port_id, values):
"""Update properties of an port.
:param port_id: The id or MAC of a port.
:param values: Dict of values to update.
:returns: A port.
"""
@abc.abstractmethod
def destroy_port(self, port_id):
"""Destroy an port.
:param port_id: The id or MAC of a port.
"""
@abc.abstractmethod
def create_chassis(self, values):
"""Create a new chassis.
:param values: Dict of values.
"""
@abc.abstractmethod
def get_chassis_by_id(self, chassis_id):
"""Return a chassis representation.
:param chassis_id: The id of a chassis.
:returns: A chassis.
"""
@abc.abstractmethod
def get_chassis_by_uuid(self, chassis_uuid):
"""Return a chassis representation.
:param chassis_uuid: The uuid of a chassis.
:returns: A chassis.
"""
@abc.abstractmethod
def get_chassis_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of chassis.
:param limit: Maximum number of chassis to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def update_chassis(self, chassis_id, values):
"""Update properties of an chassis.
:param chassis_id: The id or the uuid of a chassis.
:param values: Dict of values to update.
:returns: A chassis.
"""
@abc.abstractmethod
def destroy_chassis(self, chassis_id):
"""Destroy a chassis.
:param chassis_id: The id or the uuid of a chassis.
"""
@abc.abstractmethod
def register_conductor(self, values, update_existing=False):
"""Register an active conductor with the cluster.
:param values: A dict of values which must contain the following:
::
{
'hostname': the unique hostname which identifies
this Conductor service.
'drivers': a list of supported drivers.
}
:param update_existing: When false, registration will raise an
exception when a conflicting online record
is found. When true, will overwrite the
existing record. Default: False.
:returns: A conductor.
:raises: ConductorAlreadyRegistered
"""
@abc.abstractmethod
def get_conductor(self, hostname):
"""Retrieve a conductor's service record from the database.
:param hostname: The hostname of the conductor service.
:returns: A conductor.
:raises: ConductorNotFound
"""
@abc.abstractmethod
def unregister_conductor(self, hostname):
"""Remove this conductor from the service registry immediately.
:param hostname: The hostname of this conductor service.
:raises: ConductorNotFound
"""
@abc.abstractmethod
def touch_conductor(self, hostname):
"""Mark a conductor as active by updating its 'updated_at' property.
:param hostname: The hostname of this conductor service.
:raises: ConductorNotFound
"""
@abc.abstractmethod
def get_active_driver_dict(self, interval):
"""Retrieve drivers for the registered and active conductors.
:param interval: Seconds since last check-in of a conductor.
:returns: A dict which maps driver names to the set of hosts
which support them. For example:
::
{driverA: set([host1, host2]),
driverB: set([host2, host3])}
"""
@abc.abstractmethod
def get_offline_conductors(self):
"""Get a list conductor hostnames that are offline (dead).
:returns: A list of conductor hostnames.
"""
@abc.abstractmethod
def touch_node_provisioning(self, node_id):
"""Mark the node's provisioning as running.
Mark the node's provisioning as running by updating its
'provision_updated_at' property.
:param node_id: The id of a node.
:raises: NodeNotFound
"""
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: MPL-2.0
package api
import (
"context"
"fmt"
"net/http"
)
// SSH is used to return a client to invoke operations on SSH backend.
type SSH struct {
c *Client
MountPoint string
}
// SSH returns the client for logical-backend API calls.
func (c *Client) SSH() *SSH {
return c.SSHWithMountPoint(SSHHelperDefaultMountPoint)
}
// SSHWithMountPoint returns the client with specific SSH mount point.
func (c *Client) SSHWithMountPoint(mountPoint string) *SSH {
return &SSH{
c: c,
MountPoint: mountPoint,
}
}
// Credential wraps CredentialWithContext using context.Background.
func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, error) {
return c.CredentialWithContext(context.Background(), role, data)
}
// CredentialWithContext invokes the SSH backend API to create a credential to establish an SSH session.
func (c *SSH) CredentialWithContext(ctx context.Context, role string, data map[string]interface{}) (*Secret, error) {
ctx, cancelFunc := c.c.withConfiguredTimeout(ctx)
defer cancelFunc()
r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/%s/creds/%s", c.MountPoint, role))
if err := r.SetJSONBody(data); err != nil {
return nil, err
}
resp, err := c.c.rawRequestWithContext(ctx, r)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ParseSecret(resp.Body)
}
// SignKey wraps SignKeyWithContext using context.Background.
func (c *SSH) SignKey(role string, data map[string]interface{}) (*Secret, error) {
return c.SignKeyWithContext(context.Background(), role, data)
}
// SignKeyWithContext signs the given public key and returns a signed public key to pass
// along with the SSH request.
func (c *SSH) SignKeyWithContext(ctx context.Context, role string, data map[string]interface{}) (*Secret, error) {
ctx, cancelFunc := c.c.withConfiguredTimeout(ctx)
defer cancelFunc()
r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/%s/sign/%s", c.MountPoint, role))
if err := r.SetJSONBody(data); err != nil {
return nil, err
}
resp, err := c.c.rawRequestWithContext(ctx, r)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ParseSecret(resp.Body)
}
|
go
|
github
|
https://github.com/hashicorp/vault
|
api/ssh.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package phases
import (
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow"
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
markcontrolplanephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/markcontrolplane"
"k8s.io/kubernetes/cmd/kubeadm/app/util/errors"
)
var (
markControlPlaneExample = cmdutil.Examples(`
# Applies control-plane label and taint to the current node, functionally equivalent to what executed by kubeadm init.
kubeadm init phase mark-control-plane --config config.yaml
# Applies control-plane label and taint to a specific node
kubeadm init phase mark-control-plane --node-name myNode
`)
)
// NewMarkControlPlanePhase creates a kubeadm workflow phase that implements mark-controlplane checks.
func NewMarkControlPlanePhase() workflow.Phase {
return workflow.Phase{
Name: "mark-control-plane",
Short: "Mark a node as a control-plane",
Example: markControlPlaneExample,
InheritFlags: []string{
options.NodeName,
options.CfgPath,
options.DryRun,
},
Run: runMarkControlPlane,
}
}
// runMarkControlPlane executes mark-control-plane checks logic.
func runMarkControlPlane(c workflow.RunData) error {
data, ok := c.(InitData)
if !ok {
return errors.New("mark-control-plane phase invoked with an invalid data struct")
}
client, err := data.Client()
if err != nil {
return err
}
nodeRegistration := data.Cfg().NodeRegistration
return markcontrolplanephase.MarkControlPlane(client, nodeRegistration.Name, nodeRegistration.Taints)
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
cmd/kubeadm/app/cmd/phases/init/markcontrolplane.go
|
# Tests that require installed backends go into
# sympy/test_external/test_autowrap
import os
import tempfile
import shutil
from sympy.utilities.autowrap import autowrap, binary_function, CythonCodeWrapper, \
ufuncify
from sympy.utilities.codegen import Routine, CCodeGen, CodeGenArgumentListError
from sympy.utilities.pytest import raises
from sympy.core import symbols, Eq
from sympy.core.compatibility import StringIO
def get_string(dump_fn, routines, prefix="file", header=False, empty=False):
"""Wrapper for dump_fn. dump_fn writes its results to a stream object and
this wrapper returns the contents of that stream as a string. This
auxiliary function is used by many tests below.
The header and the empty lines are not generator to facilitate the
testing of the output.
"""
output = StringIO()
dump_fn(routines, output, prefix, header, empty)
source = output.getvalue()
output.close()
return source
def test_cython_wrapper_scalar_function():
x, y, z = symbols('x,y,z')
expr = (x + y)*z
routine = Routine("test", expr)
code_gen = CythonCodeWrapper(CCodeGen())
source = get_string(code_gen.dump_pyx, [routine])
expected = (
'cdef extern from "file.h":\n'
' double test(double x, double y, double z)\n'
'def test_c(double x, double y, double z):\n'
' return test(x, y, z)\n'
)
assert source == expected
def test_cython_wrapper_outarg():
from sympy import Equality
x, y, z = symbols('x,y,z')
code_gen = CythonCodeWrapper(CCodeGen())
routine = Routine("test", Equality(z, x + y))
source = get_string(code_gen.dump_pyx, [routine])
expected = (
'cdef extern from "file.h":\n'
' void test(double x, double y, double &z)\n'
'def test_c(double x, double y):\n'
' cdef double z\n'
' test(x, y, z)\n'
' return z\n'
)
assert source == expected
def test_cython_wrapper_inoutarg():
from sympy import Equality
x, y, z = symbols('x,y,z')
code_gen = CythonCodeWrapper(CCodeGen())
routine = Routine("test", Equality(z, x + y + z))
source = get_string(code_gen.dump_pyx, [routine])
expected = (
'cdef extern from "file.h":\n'
' void test(double x, double y, double &z)\n'
'def test_c(double x, double y, double z):\n'
' test(x, y, z)\n'
' return z\n'
)
assert source == expected
def test_autowrap_dummy():
x, y, z = symbols('x y z')
# Uses DummyWrapper to test that codegen works as expected
f = autowrap(x + y, backend='dummy')
assert f() == str(x + y)
assert f.args == "x, y"
assert f.returns == "nameless"
f = autowrap(Eq(z, x + y), backend='dummy')
assert f() == str(x + y)
assert f.args == "x, y"
assert f.returns == "z"
f = autowrap(Eq(z, x + y + z), backend='dummy')
assert f() == str(x + y + z)
assert f.args == "x, y, z"
assert f.returns == "z"
def test_autowrap_args():
x, y, z = symbols('x y z')
raises(CodeGenArgumentListError, lambda: autowrap(Eq(z, x + y),
backend='dummy', args=[x]))
f = autowrap(Eq(z, x + y), backend='dummy', args=[y, x])
assert f() == str(x + y)
assert f.args == "y, x"
assert f.returns == "z"
raises(CodeGenArgumentListError, lambda: autowrap(Eq(z, x + y + z),
backend='dummy', args=[x, y]))
f = autowrap(Eq(z, x + y + z), backend='dummy', args=[y, x, z])
assert f() == str(x + y + z)
assert f.args == "y, x, z"
assert f.returns == "z"
def test_autowrap_store_files():
x, y = symbols('x y')
tmp = tempfile.mkdtemp()
try:
f = autowrap(x + y, backend='dummy', tempdir=tmp)
assert f() == str(x + y)
assert os.access(tmp, os.F_OK)
finally:
shutil.rmtree(tmp)
def test_binary_function():
x, y = symbols('x y')
f = binary_function('f', x + y, backend='dummy')
assert f._imp_() == str(x + y)
def test_ufuncify():
x, y = symbols('x y')
f = ufuncify((x, y), x + y, backend='dummy')
assert f() == "f(_x[_i], y)"
|
unknown
|
codeparrot/codeparrot-clean
| ||
import {Component, model, ChangeDetectionStrategy} from '@angular/core';
import {CustomCheckbox} from './custom-checkbox';
@Component({
selector: 'app-root',
imports: [CustomCheckbox],
template: `
<div class="shopping-app">
<h1>Custom Checkbox Example</h1>
<div class="demo-section">
<!-- Two-way binding with custom components -->
<custom-checkbox [(checked)]="agreedToTerms" label="I agree to the terms" />
<custom-checkbox [(checked)]="enableNotifications" label="Enable notifications" />
<!-- Controls to test two-way binding -->
<div class="controls">
<p>
Terms agreed:
@if (agreedToTerms()) {
Yes
} @else {
No
}
</p>
<p>
Notifications:
@if (enableNotifications()) {
Enabled
} @else {
Disabled
}
</p>
<button (click)="toggleTermsFromParent()">Toggle Terms from Parent</button>
<button (click)="resetAll()">Reset All</button>
</div>
</div>
</div>
`,
styleUrl: './app.css',
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class App {
// Parent signal models
agreedToTerms = model(false);
enableNotifications = model(true);
// Methods to test two-way binding
toggleTermsFromParent() {
this.agreedToTerms.set(!this.agreedToTerms());
}
resetAll() {
this.agreedToTerms.set(false);
this.enableNotifications.set(false);
}
}
|
typescript
|
github
|
https://github.com/angular/angular
|
adev/src/content/tutorials/signals/steps/6-two-way-binding-with-model-signals/answer/src/app/app.ts
|
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/apertus/modular_apertus.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_apertus.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# Copyright 2025 the HuggingFace Inc. team and the Swiss AI Initiative. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from typing import Optional
import torch
from torch import nn
from ...activations import ACT2CLS, ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...integrations import use_kernel_forward_from_hub, use_kernel_func_from_hub, use_kernelized_func
from ...masking_utils import create_causal_mask
from ...modeling_layers import GenericForTokenClassification, GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.generic import maybe_autocast, merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from .configuration_apertus import ApertusConfig
class ApertusMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
if config.hidden_act == "xielu":
self.act_fn = ACT2CLS["xielu"](dtype=config.dtype)
def forward(self, x):
return self.down_proj(self.act_fn(self.up_proj(x)))
@use_kernel_forward_from_hub("RMSNorm")
class ApertusRMSNorm(nn.Module):
def __init__(self, hidden_size, eps: float = 1e-6) -> None:
"""
ApertusRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class ApertusRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: ApertusConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
@staticmethod
def compute_default_rope_parameters(
config: ApertusConfig | None = None,
device: Optional["torch.device"] = None,
seq_len: int | None = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None,
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
@use_kernelized_func(apply_rotary_pos_emb)
class ApertusAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: ApertusConfig, layer_idx: int | None = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.q_norm = ApertusRMSNorm(self.head_dim, config.rms_norm_eps)
self.k_norm = ApertusRMSNorm(self.head_dim, config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class ApertusDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: ApertusConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = ApertusAttention(config=config, layer_idx=layer_idx)
self.mlp = ApertusMLP(config)
self.attention_layernorm = ApertusRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.feedforward_layernorm = ApertusRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
residual = hidden_states
hidden_states = self.attention_layernorm(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.feedforward_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
class ApertusPreTrainedModel(PreTrainedModel):
config: ApertusConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["ApertusDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": ApertusDecoderLayer,
"attentions": ApertusAttention,
}
@auto_docstring
class ApertusModel(ApertusPreTrainedModel):
def __init__(self, config: ApertusConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[ApertusDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = ApertusRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = ApertusRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
cache_position: torch.LongTensor | None = None,
use_cache: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = (
torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
class ApertusForCausalLM(ApertusPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_gather_output"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = ApertusModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, ApertusForCausalLM
>>> model = ApertusForCausalLM.from_pretrained("swiss-ai/Apertus-8B")
>>> tokenizer = AutoTokenizer.from_pretrained("swiss-ai/Apertus-8B")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class ApertusForTokenClassification(GenericForTokenClassification, ApertusPreTrainedModel):
pass
__all__ = ["ApertusModel", "ApertusForCausalLM", "ApertusForTokenClassification", "ApertusPreTrainedModel"]
|
python
|
github
|
https://github.com/huggingface/transformers
|
src/transformers/models/apertus/modeling_apertus.py
|
# frozen_string_literal: true
module Jekyll
class LiquidRenderer
class Table
GAUGES = [:count, :bytes, :time].freeze
def initialize(stats)
@stats = stats
end
def to_s(num_of_rows = 50)
Jekyll::Profiler.tabulate(data_for_table(num_of_rows))
end
private
def data_for_table(num_of_rows)
sorted = @stats.sort_by { |_, file_stats| -file_stats[:time] }
sorted = sorted.slice(0, num_of_rows)
table = [header_labels]
sorted.each do |filename, file_stats|
row = []
row << filename
row << file_stats[:count].to_s
row << format_bytes(file_stats[:bytes])
row << format("%.3f", file_stats[:time])
table << row
end
table
end
def header_labels
GAUGES.map { |gauge| gauge.to_s.capitalize }.unshift("Filename")
end
def format_bytes(bytes)
bytes /= 1024.0
format("%.2fK", bytes)
end
end
end
end
|
ruby
|
github
|
https://github.com/jekyll/jekyll
|
lib/jekyll/liquid_renderer/table.rb
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for git_rebase_update.py"""
import os
import sys
DEPOT_TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, DEPOT_TOOLS_ROOT)
from testing_support import coverage_utils
from testing_support import git_test_utils
class GitRebaseUpdateTest(git_test_utils.GitRepoReadWriteTestBase):
REPO_SCHEMA = """
A B C D E F G
B H I J K
J L
"""
@classmethod
def getRepoContent(cls, commit):
# Every commit X gets a file X with the content X
return {commit: {'data': commit}}
@classmethod
def setUpClass(cls):
super(GitRebaseUpdateTest, cls).setUpClass()
import git_rebase_update, git_new_branch, git_reparent_branch, git_common
import git_rename_branch
cls.reup = git_rebase_update
cls.rp = git_reparent_branch
cls.nb = git_new_branch
cls.mv = git_rename_branch
cls.gc = git_common
cls.gc.TEST_MODE = True
def setUp(self):
super(GitRebaseUpdateTest, self).setUp()
# Include branch_K, branch_L to make sure that ABCDEFG all get the
# same commit hashes as self.repo. Otherwise they get committed with the
# wrong timestamps, due to commit ordering.
# TODO(iannucci): Make commit timestamps deterministic in left to right, top
# to bottom order, not in lexi-topographical order.
origin_schema = git_test_utils.GitRepoSchema("""
A B C D E F G M N O
B H I J K
J L
""", self.getRepoContent)
self.origin = origin_schema.reify()
self.origin.git('checkout', 'master')
self.origin.git('branch', '-d', *['branch_'+l for l in 'KLG'])
self.repo.git('remote', 'add', 'origin', self.origin.repo_path)
self.repo.git('config', '--add', 'remote.origin.fetch',
'+refs/tags/*:refs/tags/*')
self.repo.git('update-ref', 'refs/remotes/origin/master', 'tag_E')
self.repo.git('branch', '--set-upstream-to', 'branch_G', 'branch_K')
self.repo.git('branch', '--set-upstream-to', 'branch_K', 'branch_L')
self.repo.git('branch', '--set-upstream-to', 'origin/master', 'branch_G')
self.repo.to_schema_refs += ['origin/master']
def tearDown(self):
self.origin.nuke()
super(GitRebaseUpdateTest, self).tearDown()
def testRebaseUpdate(self):
self.repo.git('checkout', 'branch_K')
self.repo.run(self.nb.main, ['foobar'])
self.assertEqual(self.repo.git('rev-parse', 'HEAD').stdout,
self.repo.git('rev-parse', 'origin/master').stdout)
with self.repo.open('foobar', 'w') as f:
f.write('this is the foobar file')
self.repo.git('add', 'foobar')
self.repo.git_commit('foobar1')
with self.repo.open('foobar', 'w') as f:
f.write('totes the Foobar file')
self.repo.git_commit('foobar2')
self.repo.run(self.nb.main, ['--upstream-current', 'int1_foobar'])
self.repo.run(self.nb.main, ['--upstream-current', 'int2_foobar'])
self.repo.run(self.nb.main, ['--upstream-current', 'sub_foobar'])
with self.repo.open('foobar', 'w') as f:
f.write('some more foobaring')
self.repo.git('add', 'foobar')
self.repo.git_commit('foobar3')
self.repo.git('checkout', 'branch_K')
self.repo.run(self.nb.main, ['--upstream-current', 'sub_K'])
with self.repo.open('K', 'w') as f:
f.write('This depends on K')
self.repo.git_commit('sub_K')
self.repo.run(self.nb.main, ['old_branch'])
self.repo.git('reset', '--hard', self.repo['A'])
with self.repo.open('old_file', 'w') as f:
f.write('old_files we want to keep around')
self.repo.git('add', 'old_file')
self.repo.git_commit('old_file')
self.repo.git('config', 'branch.old_branch.dormant', 'true')
self.repo.git('checkout', 'origin/master')
self.assertSchema("""
A B H I J K sub_K
J L
B C D E foobar1 foobar2 foobar3
E F G
A old_file
""")
self.assertEquals(self.repo['A'], self.origin['A'])
self.assertEquals(self.repo['E'], self.origin['E'])
with self.repo.open('bob', 'wb') as f:
f.write('testing auto-freeze/thaw')
output, _ = self.repo.capture_stdio(self.reup.main)
self.assertIn('Cannot rebase-update', output)
self.repo.run(self.nb.main, ['empty_branch'])
self.repo.run(self.nb.main, ['--upstream-current', 'empty_branch2'])
self.repo.git('checkout', 'branch_K')
output, _ = self.repo.capture_stdio(self.reup.main)
self.assertIn('Rebasing: branch_G', output)
self.assertIn('Rebasing: branch_K', output)
self.assertIn('Rebasing: branch_L', output)
self.assertIn('Rebasing: foobar', output)
self.assertIn('Rebasing: sub_K', output)
self.assertIn('Deleted branch branch_G', output)
self.assertIn('Deleted branch empty_branch', output)
self.assertIn('Deleted branch empty_branch2', output)
self.assertIn('Deleted branch int1_foobar', output)
self.assertIn('Deleted branch int2_foobar', output)
self.assertIn('Reparented branch_K to track origin/master', output)
self.assertIn('Reparented sub_foobar to track foobar', output)
self.assertSchema("""
A B C D E F G M N O H I J K sub_K
K L
O foobar1 foobar2 foobar3
A old_file
""")
output, _ = self.repo.capture_stdio(self.reup.main)
self.assertIn('branch_K up-to-date', output)
self.assertIn('branch_L up-to-date', output)
self.assertIn('foobar up-to-date', output)
self.assertIn('sub_K up-to-date', output)
with self.repo.open('bob') as f:
self.assertEquals('testing auto-freeze/thaw', f.read())
self.assertEqual(self.repo.git('status', '--porcelain').stdout, '?? bob\n')
self.repo.git('checkout', 'origin/master')
_, err = self.repo.capture_stdio(self.rp.main, [])
self.assertIn('Must specify new parent somehow', err)
_, err = self.repo.capture_stdio(self.rp.main, ['foobar'])
self.assertIn('Must be on the branch', err)
self.repo.git('checkout', 'branch_K')
_, err = self.repo.capture_stdio(self.rp.main, ['origin/master'])
self.assertIn('Cannot reparent a branch to its existing parent', err)
output, _ = self.repo.capture_stdio(self.rp.main, ['foobar'])
self.assertIn('Rebasing: branch_K', output)
self.assertIn('Rebasing: sub_K', output)
self.assertIn('Rebasing: branch_L', output)
self.assertSchema("""
A B C D E F G M N O foobar1 foobar2 H I J K L
foobar2 foobar3
K sub_K
A old_file
""")
self.repo.git('checkout', 'sub_K')
output, _ = self.repo.capture_stdio(self.rp.main, ['foobar'])
self.assertIn('Squashing failed', output)
self.assertTrue(self.repo.run(self.gc.in_rebase))
self.repo.git('rebase', '--abort')
self.assertIsNone(self.repo.run(self.gc.thaw))
self.assertSchema("""
A B C D E F G M N O foobar1 foobar2 H I J K L
foobar2 foobar3
A old_file
K sub_K
""")
self.assertEqual(self.repo.git('status', '--porcelain').stdout, '?? bob\n')
branches = self.repo.run(set, self.gc.branches())
self.assertEqual(branches, {'branch_K', 'master', 'sub_K', 'root_A',
'branch_L', 'old_branch', 'foobar',
'sub_foobar'})
self.repo.git('checkout', 'branch_K')
self.repo.run(self.mv.main, ['special_K'])
branches = self.repo.run(set, self.gc.branches())
self.assertEqual(branches, {'special_K', 'master', 'sub_K', 'root_A',
'branch_L', 'old_branch', 'foobar',
'sub_foobar'})
self.repo.git('checkout', 'origin/master')
_, err = self.repo.capture_stdio(self.mv.main, ['special_K', 'cool branch'])
self.assertIn('fatal: \'cool branch\' is not a valid branch name.', err)
self.repo.run(self.mv.main, ['special_K', 'cool_branch'])
branches = self.repo.run(set, self.gc.branches())
# This check fails with git 2.4 (see crbug.com/487172)
self.assertEqual(branches, {'cool_branch', 'master', 'sub_K', 'root_A',
'branch_L', 'old_branch', 'foobar',
'sub_foobar'})
_, branch_tree = self.repo.run(self.gc.get_branch_tree)
self.assertEqual(branch_tree['sub_K'], 'foobar')
def testRebaseConflicts(self):
# Pretend that branch_L landed
self.origin.git('checkout', 'master')
with self.origin.open('L', 'w') as f:
f.write('L')
self.origin.git('add', 'L')
self.origin.git_commit('L')
# Add a commit to branch_K so that things fail
self.repo.git('checkout', 'branch_K')
with self.repo.open('M', 'w') as f:
f.write('NOPE')
self.repo.git('add', 'M')
self.repo.git_commit('K NOPE')
# Add a commits to branch_L which will work when squashed
self.repo.git('checkout', 'branch_L')
self.repo.git('reset', 'branch_L~')
with self.repo.open('L', 'w') as f:
f.write('NOPE')
self.repo.git('add', 'L')
self.repo.git_commit('L NOPE')
with self.repo.open('L', 'w') as f:
f.write('L')
self.repo.git('add', 'L')
self.repo.git_commit('L YUP')
# start on a branch which will be deleted
self.repo.git('checkout', 'branch_G')
output, _ = self.repo.capture_stdio(self.reup.main)
self.assertIn('branch.branch_K.dormant true', output)
output, _ = self.repo.capture_stdio(self.reup.main)
self.assertIn('Rebase in progress', output)
self.repo.git('checkout', '--theirs', 'M')
self.repo.git('rebase', '--skip')
output, _ = self.repo.capture_stdio(self.reup.main)
self.assertIn('Failed! Attempting to squash', output)
self.assertIn('Deleted branch branch_G', output)
self.assertIn('Deleted branch branch_L', output)
self.assertIn('\'branch_G\' was merged', output)
self.assertIn('checking out \'origin/master\'', output)
def testRebaseConflictsKeepGoing(self):
# Pretend that branch_L landed
self.origin.git('checkout', 'master')
with self.origin.open('L', 'w') as f:
f.write('L')
self.origin.git('add', 'L')
self.origin.git_commit('L')
# Add a commit to branch_K so that things fail
self.repo.git('checkout', 'branch_K')
with self.repo.open('M', 'w') as f:
f.write('NOPE')
self.repo.git('add', 'M')
self.repo.git_commit('K NOPE')
# Add a commits to branch_L which will work when squashed
self.repo.git('checkout', 'branch_L')
self.repo.git('reset', 'branch_L~')
with self.repo.open('L', 'w') as f:
f.write('NOPE')
self.repo.git('add', 'L')
self.repo.git_commit('L NOPE')
with self.repo.open('L', 'w') as f:
f.write('L')
self.repo.git('add', 'L')
self.repo.git_commit('L YUP')
# start on a branch which will be deleted
self.repo.git('checkout', 'branch_G')
self.repo.git('config', 'branch.branch_K.dormant', 'false')
output, _ = self.repo.capture_stdio(self.reup.main, ['-k'])
self.assertIn('--keep-going set, continuing with next branch.', output)
self.assertIn('could not be cleanly rebased:', output)
self.assertIn(' branch_K', output)
def testTrackTag(self):
self.origin.git('tag', 'lkgr', self.origin['M'])
self.repo.git('tag', 'lkgr', self.repo['D'])
self.repo.git('config', 'branch.branch_G.remote', '.')
self.repo.git('config', 'branch.branch_G.merge', 'refs/tags/lkgr')
self.assertIn(
'fatal: \'foo bar\' is not a valid branch name',
self.repo.capture_stdio(self.nb.main, ['--lkgr', 'foo bar'])[1])
self.repo.run(self.nb.main, ['--lkgr', 'foobar'])
with self.repo.open('foobar', 'w') as f:
f.write('this is the foobar file')
self.repo.git('add', 'foobar')
self.repo.git_commit('foobar1')
with self.repo.open('foobar', 'w') as f:
f.write('totes the Foobar file')
self.repo.git_commit('foobar2')
self.assertSchema("""
A B H I J K
J L
B C D E F G
D foobar1 foobar2
""")
self.assertEquals(self.repo['A'], self.origin['A'])
self.assertEquals(self.repo['G'], self.origin['G'])
output, _ = self.repo.capture_stdio(self.reup.main)
self.assertIn('Fetching', output)
self.assertIn('Rebasing: branch_G', output)
self.assertIn('Rebasing: branch_K', output)
self.assertIn('Rebasing: branch_L', output)
self.assertIn('Rebasing: foobar', output)
self.assertEquals(self.repo.git('rev-parse', 'lkgr').stdout.strip(),
self.origin['M'])
self.assertSchema("""
A B C D E F G M N O
M H I J K L
M foobar1 foobar2
""")
_, err = self.repo.capture_stdio(self.rp.main, ['tag F'])
self.assertIn('fatal: invalid reference', err)
output, _ = self.repo.capture_stdio(self.rp.main, ['tag_F'])
self.assertIn('to track tag_F [tag] (was lkgr [tag])', output)
self.assertSchema("""
A B C D E F G M N O
M H I J K L
F foobar1 foobar2
""")
output, _ = self.repo.capture_stdio(self.rp.main, ['--lkgr'])
self.assertIn('to track lkgr [tag] (was tag_F [tag])', output)
self.assertSchema("""
A B C D E F G M N O
M H I J K L
M foobar1 foobar2
""")
output, _ = self.repo.capture_stdio(self.rp.main, ['--root'])
self.assertIn('to track origin/master (was lkgr [tag])', output)
self.assertSchema("""
A B C D E F G M N O foobar1 foobar2
M H I J K L
""")
def testReparentBranchWithoutUpstream(self):
self.repo.git('branch', 'nerp')
self.repo.git('checkout', 'nerp')
_, err = self.repo.capture_stdio(self.rp.main, ['branch_K'])
self.assertIn('Unable to determine nerp@{upstream}', err)
if __name__ == '__main__':
sys.exit(coverage_utils.covered_main((
os.path.join(DEPOT_TOOLS_ROOT, 'git_rebase_update.py'),
os.path.join(DEPOT_TOOLS_ROOT, 'git_new_branch.py'),
os.path.join(DEPOT_TOOLS_ROOT, 'git_reparent_branch.py'),
os.path.join(DEPOT_TOOLS_ROOT, 'git_rename_branch.py')
)))
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import absolute_import
import logging
import os
import re
import shutil
import sys
import tempfile
import warnings
import zipfile
from distutils.util import change_root
from distutils import sysconfig
from email.parser import FeedParser
from pip._vendor import pkg_resources, six
from pip._vendor.distlib.markers import interpret as markers_interpret
from pip._vendor.six.moves import configparser
import pip.wheel
from pip.compat import native_str, WINDOWS
from pip.download import is_url, url_to_path, path_to_url, is_archive_file
from pip.exceptions import (
InstallationError, UninstallationError, UnsupportedWheel,
)
from pip.locations import (
bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user,
)
from pip.utils import (
display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir,
dist_in_usersite, dist_in_site_packages, egg_link_path, make_path_relative,
call_subprocess, read_text_file, FakeFile, _make_build_dir, ensure_dir,
get_installed_version
)
from pip.utils.deprecation import RemovedInPip8Warning
from pip.utils.logging import indent_log
from pip.req.req_uninstall import UninstallPathSet
from pip.vcs import vcs
from pip.wheel import move_wheel_files, Wheel
from pip._vendor.packaging.version import Version
logger = logging.getLogger(__name__)
def _strip_extras(path):
m = re.match(r'^(.+)(\[[^\]]+\])$', path)
extras = None
if m:
path_no_extras = m.group(1)
extras = m.group(2)
else:
path_no_extras = path
return path_no_extras, extras
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
link=None, as_egg=False, update=True, editable_options=None,
pycompile=True, markers=None, isolated=False, options=None,
wheel_cache=None, constraint=False):
self.extras = ()
if isinstance(req, six.string_types):
req = pkg_resources.Requirement.parse(req)
self.extras = req.extras
self.req = req
self.comes_from = comes_from
self.constraint = constraint
self.source_dir = source_dir
self.editable = editable
if editable_options is None:
editable_options = {}
self.editable_options = editable_options
self._wheel_cache = wheel_cache
self.link = link
self.as_egg = as_egg
self.markers = markers
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
# Temporary build location
self._temp_build_dir = None
# Used to store the global directory where the _temp_build_dir should
# have been created. Cf _correct_build_location method.
self._ideal_global_dir = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
self.use_user_site = False
self.target_dir = None
self.options = options if options else {}
self.pycompile = pycompile
# Set to True after successful preparation of this requirement
self.prepared = False
self.isolated = isolated
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None,
isolated=False, options=None, wheel_cache=None,
constraint=False):
from pip.index import Link
name, url, extras_override, editable_options = parse_editable(
editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
res = cls(name, comes_from, source_dir=source_dir,
editable=True,
link=Link(url),
constraint=constraint,
editable_options=editable_options,
isolated=isolated,
options=options if options else {},
wheel_cache=wheel_cache)
if extras_override is not None:
res.extras = extras_override
return res
@classmethod
def from_line(
cls, name, comes_from=None, isolated=False, options=None,
wheel_cache=None, constraint=False):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
from pip.index import Link
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers = name.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
extras = None
if is_url(name):
link = Link(name)
else:
p, extras = _strip_extras(path)
if (os.path.isdir(p) and
(os.path.sep in name or name.startswith('.'))):
if not is_installable_dir(p):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' "
"not found." % name
)
link = Link(path_to_url(p))
elif is_archive_file(p):
if not os.path.isfile(p):
logger.warning(
'Requirement %r looks like a filename, but the '
'file does not exist',
name
)
link = Link(path_to_url(p))
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', link.url):
link = Link(
path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel on this platform." %
wheel.filename
)
req = "%s==%s" % (wheel.name, wheel.version)
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req = link.egg_fragment
# a requirement specifier
else:
req = name
options = options if options else {}
res = cls(req, comes_from, link=link, markers=markers,
isolated=isolated, options=options,
wheel_cache=wheel_cache, constraint=constraint)
if extras:
res.extras = pkg_resources.Requirement.parse('__placeholder__' +
extras).extras
return res
def __str__(self):
if self.req:
s = str(self.req)
if self.link:
s += ' from %s' % self.link.url
else:
s = self.link.url if self.link else None
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
def __repr__(self):
return '<%s object: %s editable=%r>' % (
self.__class__.__name__, str(self), self.editable)
def populate_link(self, finder, upgrade):
"""Ensure that if a link can be found for this, that it is found.
Note that self.link may still be None - if Upgrade is False and the
requirement is already installed.
"""
if self.link is None:
self.link = finder.find_requirement(self, upgrade)
@property
def link(self):
return self._link
@link.setter
def link(self, link):
# Lookup a cached wheel, if possible.
if self._wheel_cache is None:
self._link = link
else:
self._link = self._wheel_cache.cached_wheel(link, self.name)
if self._link != link:
logger.debug('Using cached wheel link: %s', self._link)
@property
def specifier(self):
return self.req.specifier
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
# for requirement via a path to a directory: the name of the
# package is not available yet so we create a temp directory
# Once run_egg_info will have run, we'll be able
# to fix it via _correct_build_location
self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-')
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
logger.debug('Creating directory %s', build_dir)
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def _correct_build_location(self):
"""Move self._temp_build_dir to self._ideal_build_dir/self.req.name
For some requirements (e.g. a path to a directory), the name of the
package is not available until we run egg_info, so the build_location
will return a temporary directory and store the _ideal_build_dir.
This is only called by self.egg_info_path to fix the temporary build
directory.
"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
assert self._ideal_build_dir
old_location = self._temp_build_dir
self._temp_build_dir = None
new_location = self.build_location(self._ideal_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug(
'Moving package %s from %s to new location %s',
self, display_path(old_location), display_path(new_location),
)
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self._ideal_build_dir = None
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return native_str(self.req.project_name)
@property
def setup_py(self):
assert self.source_dir, "No source dir for %s" % self
try:
import setuptools # noqa
except ImportError:
# Setuptools is not available
raise InstallationError(
"setuptools must be installed to install from a source "
"distribution"
)
setup_file = 'setup.py'
if self.editable_options and 'subdirectory' in self.editable_options:
setup_py = os.path.join(self.source_dir,
self.editable_options['subdirectory'],
setup_file)
else:
setup_py = os.path.join(self.source_dir, setup_file)
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(setup_py, six.text_type):
setup_py = setup_py.encode(sys.getfilesystemencoding())
return setup_py
def run_egg_info(self):
assert self.source_dir
if self.name:
logger.debug(
'Running setup.py (path:%s) egg_info for package %s',
self.setup_py, self.name,
)
else:
logger.debug(
'Running setup.py (path:%s) egg_info for package from %s',
self.setup_py, self.link,
)
with indent_log():
script = self._run_setup_py
script = script.replace('__SETUP_PY__', repr(self.setup_py))
script = script.replace('__PKG_NAME__', repr(self.name))
base_cmd = [sys.executable, '-c', script]
if self.isolated:
base_cmd += ["--no-user-cfg"]
egg_info_cmd = base_cmd + ['egg_info']
# We can't put the .egg-info files at the root, because then the
# source code will be mistaken for an installed egg, causing
# problems
if self.editable:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info')
ensure_dir(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=cwd,
show_stdout=False,
command_level=logging.DEBUG,
command_desc='python setup.py egg_info')
if not self.req:
if isinstance(
pkg_resources.parse_version(self.pkg_info()["Version"]),
Version):
op = "=="
else:
op = "==="
self.req = pkg_resources.Requirement.parse(
"".join([
self.pkg_info()["Name"],
op,
self.pkg_info()["Version"],
]))
self._correct_build_location()
# FIXME: This is a lame hack, entirely for PasteScript which has
# a self-provided entry point that causes this awkwardness
_run_setup_py = """
__file__ = __SETUP_PY__
from setuptools.command import egg_info
import pkg_resources
import os
import tokenize
def replacement_run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in pkg_resources.iter_entry_points('egg_info.writers'):
# require=False is the change we're making:
writer = ep.load(require=False)
if writer:
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
self.find_sources()
egg_info.egg_info.run = replacement_run
exec(compile(
getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'),
__file__,
'exec'
))
"""
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
data = read_text_file(filename)
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.source_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv
# environment
if (
os.path.exists(
os.path.join(root, dir, 'bin', 'python')
) or
os.path.exists(
os.path.join(
root, dir, 'Scripts', 'Python.exe'
)
)):
dirs.remove(dir)
# Also don't search through tests
elif dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError(
'No files/directories in %s (from %s)' % (base, filename)
)
assert filenames, \
"No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This
# can easily be the case if there is a dist folder which contains
# an extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(
key=lambda x: x.count(os.path.sep) +
(os.path.altsep and x.count(os.path.altsep) or 0)
)
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warning(
'No PKG-INFO file found in %s',
display_path(self.egg_info_path('PKG-INFO')),
)
p.feed(data or '')
return p.close()
_requirements_section_re = re.compile(r'\[(.*?)\]')
@property
def installed_version(self):
return get_installed_version(self.name)
def assert_source_matches_version(self):
assert self.source_dir
version = self.pkg_info()['version']
if version not in self.req:
logger.warning(
'Requested %s, but installing version %s',
self,
self.installed_version,
)
else:
logger.debug(
'Source in %s has version %s, which satisfies requirement %s',
display_path(self.source_dir),
version,
self,
)
def update_editable(self, obtain=True):
if not self.link:
logger.debug(
"Cannot update repository at %s; repository location is "
"unknown",
self.source_dir,
)
return
assert self.editable
assert self.source_dir
if self.link.scheme == 'file':
# Static paths don't get updated
return
assert '+' in self.link.url, "bad url: %r" % self.link.url
if not self.update:
return
vc_type, url = self.link.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.link.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.link, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError(
"Cannot uninstall requirement %s, not installed" % (self.name,)
)
dist = self.satisfied_by or self.conflicts_with
paths_to_remove = UninstallPathSet(dist)
develop_egg_link = egg_link_path(dist)
develop_egg_link_egg_info = '{0}.egg-info'.format(
pkg_resources.to_filename(dist.project_name))
egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info)
# Special case for distutils installed package
distutils_egg_info = getattr(dist._provider, 'path', None)
# Uninstall cases order do matter as in the case of 2 installs of the
# same package, pip needs to uninstall the currently detected version
if (egg_info_exists and dist.egg_info.endswith('.egg-info') and
not dist.egg_info.endswith(develop_egg_link_egg_info)):
# if dist.egg_info.endswith(develop_egg_link_egg_info), we
# are in fact in the develop_egg_link case
paths_to_remove.add(dist.egg_info)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(dist.egg_info, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
elif distutils_egg_info:
warnings.warn(
"Uninstalling a distutils installed project ({0}) has been "
"deprecated and will be removed in a future version. This is "
"due to the fact that uninstalling a distutils project will "
"only partially uninstall the project.".format(self.name),
RemovedInPip8Warning,
)
paths_to_remove.add(distutils_egg_info)
elif dist.location.endswith('.egg'):
# package installed by easy_install
# We cannot match on dist.egg_name because it can slightly vary
# i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
paths_to_remove.add(dist.location)
easy_install_egg = os.path.split(dist.location)[1]
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif develop_egg_link:
# develop egg
with open(develop_egg_link, 'r') as fh:
link_pointer = os.path.normcase(fh.readline().strip())
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, self.name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
elif egg_info_exists and dist.egg_info.endswith('.dist-info'):
for path in pip.wheel.uninstallation_paths(dist):
paths_to_remove.add(path)
else:
logger.debug(
'Not sure how to uninstall: %s - Check: %s',
dist, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
config = configparser.SafeConfigParser()
config.readfp(
FakeFile(dist.get_metadata_lines('entry_points.txt'))
)
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, name))
if WINDOWS:
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe.manifest'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '-script.py'
)
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error(
"Can't rollback %s, nothing uninstalled.", self.project_name,
)
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
else:
logger.error(
"Can't commit %s, nothing uninstalled.", self.project_name,
)
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.pkg_info()["version"])
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(archive_path), ('i', 'w', 'b'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warning('Deleting %s', display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warning(
'Backing up %s to %s',
display_path(archive_path),
display_path(dest_file),
)
shutil.move(archive_path, dest_file)
if create_archive:
zip = zipfile.ZipFile(
archive_path, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True
)
dir = os.path.normcase(os.path.abspath(self.source_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.info('Saved %s', display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix + os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix)
)
name = name[len(prefix) + 1:]
name = name.replace(os.path.sep, '/')
return name
def match_markers(self):
if self.markers is not None:
return markers_interpret(self.markers)
else:
return True
def install(self, install_options, global_options=[], root=None):
if self.editable:
self.install_editable(install_options, global_options)
return
if self.is_wheel:
version = pip.wheel.wheel_version(self.source_dir)
pip.wheel.check_compatibility(version, self.name)
self.move_wheel_files(self.source_dir, root=root)
self.install_succeeded = True
return
# Extend the list of global and install options passed on to
# the setup.py call with the ones from the requirements file.
# Options specified in requirements file override those
# specified on the command line, since the last option given
# to setup.py is the one that is used.
global_options += self.options.get('global_options', [])
install_options += self.options.get('install_options', [])
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = [sys.executable]
install_args.append('-c')
install_args.append(
"import setuptools, tokenize;__file__=%r;"
"exec(compile(getattr(tokenize, 'open', open)(__file__).read()"
".replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
)
install_args += list(global_options) + \
['install', '--record', record_filename]
if not self.as_egg:
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if self.pycompile:
install_args += ["--compile"]
else:
install_args += ["--no-compile"]
if running_under_virtualenv():
py_ver_str = 'python' + sysconfig.get_python_version()
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
py_ver_str, self.name)]
logger.info('Running setup.py install for %s', self.name)
with indent_log():
call_subprocess(
install_args + install_options,
cwd=self.source_dir,
show_stdout=False,
)
if not os.path.exists(record_filename):
logger.debug('Record file %s not found', record_filename)
return
self.install_succeeded = True
if self.as_egg:
# there's no --always-unzip option we can pass to install
# command so we unable to save the installed-files.txt
return
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
with open(record_filename) as f:
for line in f:
directory = os.path.dirname(line)
if directory.endswith('.egg-info'):
egg_info_dir = prepend_root(directory)
break
else:
logger.warning(
'Could not find .egg-info directory in install record'
' for %s',
self,
)
# FIXME: put the record somewhere
# FIXME: should this be an error?
return
new_lines = []
with open(record_filename) as f:
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(
make_path_relative(
prepend_root(filename), egg_info_dir)
)
inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt')
with open(inst_files_path, 'w') as f:
f.write('\n'.join(new_lines) + '\n')
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
rmtree(temp_location)
def ensure_has_source_dir(self, parent_dir):
"""Ensure that a source_dir is set.
This will create a temporary build dir if the name of the requirement
isn't known yet.
:param parent_dir: The ideal pip parent_dir for the source_dir.
Generally src_dir for editables and build_dir for sdists.
:return: self.source_dir
"""
if self.source_dir is None:
self.source_dir = self.build_location(parent_dir)
return self.source_dir
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.source_dir and os.path.exists(
os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)):
logger.debug('Removing source in %s', self.source_dir)
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options, global_options=()):
logger.info('Running setup.py develop for %s', self.name)
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
with indent_log():
# FIXME: should we do --install-headers here too?
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
[
sys.executable,
'-c',
"import setuptools, tokenize; __file__=%r; exec(compile("
"getattr(tokenize, 'open', open)(__file__).read().replace"
"('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
] +
list(global_options) +
['develop', '--no-deps'] +
list(install_options),
cwd=cwd,
show_stdout=False)
self.install_succeeded = True
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately.
"""
if self.req is None:
return False
try:
self.satisfied_by = pkg_resources.get_distribution(self.req)
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(
self.req.project_name
)
if self.use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif (running_under_virtualenv() and
dist_in_site_packages(existing_dist)):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to %s in %s" %
(existing_dist.project_name, existing_dist.location)
)
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.link and self.link.is_wheel
def move_wheel_files(self, wheeldir, root=None):
move_wheel_files(
self.name, self.req, wheeldir,
user=self.use_user_site,
home=self.target_dir,
root=root,
pycompile=self.pycompile,
isolated=self.isolated,
)
def get_dist(self):
"""Return a pkg_resources.Distribution built from self.egg_info_path"""
egg_info = self.egg_info_path('').rstrip('/')
base_dir = os.path.dirname(egg_info)
metadata = pkg_resources.PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
return pkg_resources.Distribution(
os.path.dirname(egg_info),
project_name=dist_name,
metadata=metadata)
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
# FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req
def _build_req_from_url(url):
parts = [p for p in url.split('#', 1)[0].split('/') if p]
req = None
if parts[-2] in ('tags', 'branches', 'tag', 'branch'):
req = parts[-3]
elif parts[-1] == 'trunk':
req = parts[-2]
return req
def _build_editable_options(req):
"""
This method generates a dictionary of the query string
parameters contained in a given editable URL.
"""
regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)")
matched = regexp.findall(req)
if matched:
ret = dict()
for option in matched:
(name, value) = option
if name in ret:
raise Exception("%s option already defined" % name)
ret[name] = value
return ret
return None
def parse_editable(editable_req, default_vcs=None):
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
url = editable_req
extras = None
# If a file path is specified with extras, strip off the extras.
m = re.match(r'^(.+)(\[[^\]]+\])$', url)
if m:
url_no_extras = m.group(1)
extras = m.group(2)
else:
url_no_extras = url
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not found." %
url_no_extras
)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
if extras:
return (
None,
url_no_extras,
pkg_resources.Requirement.parse(
'__placeholder__' + extras
).extras,
{},
)
else:
return None, url_no_extras, None, {}
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
if default_vcs:
url = default_vcs + '+' + url
else:
raise InstallationError(
'%s should either be a path to a local project or a VCS url '
'beginning with svn+, git+, hg+, or bzr+' %
editable_req
)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
try:
options = _build_editable_options(editable_req)
except Exception as exc:
raise InstallationError(
'--editable=%s error in editable options:%s' % (editable_req, exc)
)
if not options or 'egg' not in options:
req = _build_req_from_url(editable_req)
if not req:
raise InstallationError(
'--editable=%s is not the right format; it must have '
'#egg=Package' % editable_req
)
else:
req = options['egg']
package = _strip_postfix(req)
return package, url, None, options
|
unknown
|
codeparrot/codeparrot-clean
| ||
name: Close stale pull requests
on:
workflow_dispatch:
inputs:
endDate:
description: stop processing PRs after this date
required: false
type: string
# yamllint disable rule:empty-lines
env:
CLOSE_MESSAGE: >
This pull request was opened more than a year ago and there has
been no activity in the last 6 months. We value your contribution
but since it has not progressed in the last 6 months it is being
closed. If you feel closing this pull request is not the right thing
to do, please leave a comment.
WARN_MESSAGE: >
This pull request was opened more than a year ago and there has
been no activity in the last 5 months. We value your contribution
but since it has not progressed in the last 5 months it is being
marked stale and will be closed if there is no progress in the
next month. If you feel that is not the right thing to do please
comment on the pull request.
# yamllint enable
permissions:
contents: read
jobs:
stale:
permissions:
pull-requests: write # for actions/stale to close stale PRs
if: github.repository == 'nodejs/node'
runs-on: ubuntu-slim
steps:
- name: Set default end date which is 1 year ago
run: echo "END_DATE=$(date --date='525600 minutes ago' --rfc-2822)" >> "$GITHUB_ENV"
- name: if date set in event override the default end date
env:
END_DATE_INPUT_VALUE: ${{ github.event.inputs.endDate }}
if: ${{ github.event.inputs.endDate != '' }}
run: echo "END_DATE=$END_DATE_INPUT_VALUE" >> "$GITHUB_ENV"
- uses: mhdawson/stale@453d6581568dc43dbe345757f24408d7b451c651 # PR to add support for endDate
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
end-date: ${{ env.END_DATE }}
days-before-issue-stale: -1
days-before-issue-close: -1
days-before-stale: 150
days-before-close: 30
stale-issue-label: stale
close-issue-message: ${{ env.CLOSE_MESSAGE }}
stale-issue-message: ${{ env.WARN_MESSAGE }}
exempt-pr-labels: never-stale
# max requests it will send per run to the GitHub API before it deliberately exits to avoid hitting API rate limits
operations-per-run: 500
remove-stale-when-updated: true
|
unknown
|
github
|
https://github.com/nodejs/node
|
.github/workflows/close-stale-pull-requests.yml
|
# -*- coding: utf-8 -*-
import gc
import unittest
from markupsafe import Markup, escape, escape_silent
from markupsafe._compat import text_type
class MarkupTestCase(unittest.TestCase):
def test_adding(self):
# adding two strings should escape the unsafe one
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup('<em>username</em>')
assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe)
def test_string_interpolation(self):
# string interpolations are safe to use too
assert Markup('<em>%s</em>') % '<bad user>' == \
'<em><bad user></em>'
assert Markup('<em>%(username)s</em>') % {
'username': '<bad user>'
} == '<em><bad user></em>'
assert Markup('%i') % 3.14 == '3'
assert Markup('%.2f') % 3.14 == '3.14'
def test_type_behavior(self):
# an escaped object is markup too
assert type(Markup('foo') + 'bar') is Markup
# and it implements __html__ by returning itself
x = Markup("foo")
assert x.__html__() is x
def test_html_interop(self):
# it also knows how to treat __html__ objects
class Foo(object):
def __html__(self):
return '<em>awesome</em>'
def __unicode__(self):
return 'awesome'
__str__ = __unicode__
assert Markup(Foo()) == '<em>awesome</em>'
assert Markup('<strong>%s</strong>') % Foo() == \
'<strong><em>awesome</em></strong>'
def test_tuple_interpol(self):
self.assertEqual(Markup('<em>%s:%s</em>') % (
'<foo>',
'<bar>',
), Markup(u'<em><foo>:<bar></em>'))
def test_dict_interpol(self):
self.assertEqual(Markup('<em>%(foo)s</em>') % {
'foo': '<foo>',
}, Markup(u'<em><foo></em>'))
self.assertEqual(Markup('<em>%(foo)s:%(bar)s</em>') % {
'foo': '<foo>',
'bar': '<bar>',
}, Markup(u'<em><foo>:<bar></em>'))
def test_escaping(self):
# escaping and unescaping
assert escape('"<>&\'') == '"<>&''
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
assert Markup("<test>").unescape() == "<test>"
def test_all_set(self):
import markupsafe as markup
for item in markup.__all__:
getattr(markup, item)
def test_escape_silent(self):
assert escape_silent(None) == Markup()
assert escape(None) == Markup(None)
assert escape_silent('<foo>') == Markup(u'<foo>')
def test_splitting(self):
self.assertEqual(Markup('a b').split(), [
Markup('a'),
Markup('b')
])
self.assertEqual(Markup('a b').rsplit(), [
Markup('a'),
Markup('b')
])
self.assertEqual(Markup('a\nb').splitlines(), [
Markup('a'),
Markup('b')
])
def test_mul(self):
self.assertEqual(Markup('a') * 3, Markup('aaa'))
class MarkupLeakTestCase(unittest.TestCase):
def test_markup_leaks(self):
counts = set()
for count in range(20):
for item in range(1000):
escape("foo")
escape("<foo>")
escape(u"foo")
escape(u"<foo>")
counts.add(len(gc.get_objects()))
assert len(counts) == 1, 'ouch, c extension seems to leak objects'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MarkupTestCase))
# this test only tests the c extension
if not hasattr(escape, 'func_code'):
suite.addTest(unittest.makeSuite(MarkupLeakTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
# vim:sts=4:sw=4:et:
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Test Z-Wave lights."""
from unittest.mock import patch, MagicMock
from homeassistant.components import zwave
from homeassistant.components.zwave import const, light
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_TRANSITION,
SUPPORT_COLOR,
ATTR_WHITE_VALUE,
SUPPORT_COLOR_TEMP,
SUPPORT_WHITE_VALUE,
)
from tests.mock.zwave import MockNode, MockValue, MockEntityValues, value_changed
class MockLightValues(MockEntityValues):
"""Mock Z-Wave light values."""
def __init__(self, **kwargs):
"""Initialize the mock zwave values."""
self.dimming_duration = None
self.color = None
self.color_channels = None
super().__init__(**kwargs)
def test_get_device_detects_dimmer(mock_openzwave):
"""Test get_device returns a normal dimmer."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockLightValues(primary=value)
device = light.get_device(node=node, values=values, node_config={})
assert isinstance(device, light.ZwaveDimmer)
assert device.supported_features == SUPPORT_BRIGHTNESS
def test_get_device_detects_colorlight(mock_openzwave):
"""Test get_device returns a color light."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SWITCH_COLOR])
value = MockValue(data=0, node=node)
values = MockLightValues(primary=value)
device = light.get_device(node=node, values=values, node_config={})
assert isinstance(device, light.ZwaveColorLight)
assert device.supported_features == SUPPORT_BRIGHTNESS | SUPPORT_COLOR
def test_get_device_detects_zw098(mock_openzwave):
"""Test get_device returns a zw098 color light."""
node = MockNode(
manufacturer_id="0086",
product_id="0062",
command_classes=[const.COMMAND_CLASS_SWITCH_COLOR],
)
value = MockValue(data=0, node=node)
values = MockLightValues(primary=value)
device = light.get_device(node=node, values=values, node_config={})
assert isinstance(device, light.ZwaveColorLight)
assert device.supported_features == (
SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_COLOR_TEMP
)
def test_get_device_detects_rgbw_light(mock_openzwave):
"""Test get_device returns a color light."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SWITCH_COLOR])
value = MockValue(data=0, node=node)
color = MockValue(data="#0000000000", node=node)
color_channels = MockValue(data=0x1D, node=node)
values = MockLightValues(primary=value, color=color, color_channels=color_channels)
device = light.get_device(node=node, values=values, node_config={})
device.value_added()
assert isinstance(device, light.ZwaveColorLight)
assert device.supported_features == (
SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_WHITE_VALUE
)
def test_dimmer_turn_on(mock_openzwave):
"""Test turning on a dimmable Z-Wave light."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockLightValues(primary=value)
device = light.get_device(node=node, values=values, node_config={})
device.turn_on()
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 255
node.reset_mock()
device.turn_on(**{ATTR_BRIGHTNESS: 120})
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 46 # int(120 / 255 * 99)
with patch.object(light, "_LOGGER", MagicMock()) as mock_logger:
device.turn_on(**{ATTR_TRANSITION: 35})
assert mock_logger.debug.called
assert node.set_dimmer.called
msg, entity_id = mock_logger.debug.mock_calls[0][1]
assert entity_id == device.entity_id
def test_dimmer_min_brightness(mock_openzwave):
"""Test turning on a dimmable Z-Wave light to its minimum brightness."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockLightValues(primary=value)
device = light.get_device(node=node, values=values, node_config={})
assert not device.is_on
device.turn_on(**{ATTR_BRIGHTNESS: 1})
assert device.is_on
assert device.brightness == 1
device.turn_on(**{ATTR_BRIGHTNESS: 0})
assert device.is_on
assert device.brightness == 0
def test_dimmer_transitions(mock_openzwave):
"""Test dimming transition on a dimmable Z-Wave light."""
node = MockNode()
value = MockValue(data=0, node=node)
duration = MockValue(data=0, node=node)
values = MockLightValues(primary=value, dimming_duration=duration)
device = light.get_device(node=node, values=values, node_config={})
assert device.supported_features == SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION
# Test turn_on
# Factory Default
device.turn_on()
assert duration.data == 0xFF
# Seconds transition
device.turn_on(**{ATTR_TRANSITION: 45})
assert duration.data == 45
# Minutes transition
device.turn_on(**{ATTR_TRANSITION: 245})
assert duration.data == 0x83
# Clipped transition
device.turn_on(**{ATTR_TRANSITION: 10000})
assert duration.data == 0xFE
# Test turn_off
# Factory Default
device.turn_off()
assert duration.data == 0xFF
# Seconds transition
device.turn_off(**{ATTR_TRANSITION: 45})
assert duration.data == 45
# Minutes transition
device.turn_off(**{ATTR_TRANSITION: 245})
assert duration.data == 0x83
# Clipped transition
device.turn_off(**{ATTR_TRANSITION: 10000})
assert duration.data == 0xFE
def test_dimmer_turn_off(mock_openzwave):
"""Test turning off a dimmable Z-Wave light."""
node = MockNode()
value = MockValue(data=46, node=node)
values = MockLightValues(primary=value)
device = light.get_device(node=node, values=values, node_config={})
device.turn_off()
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 0
def test_dimmer_value_changed(mock_openzwave):
"""Test value changed for dimmer lights."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockLightValues(primary=value)
device = light.get_device(node=node, values=values, node_config={})
assert not device.is_on
value.data = 46
value_changed(value)
assert device.is_on
assert device.brightness == 118
def test_dimmer_refresh_value(mock_openzwave):
"""Test value changed for dimmer lights."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockLightValues(primary=value)
device = light.get_device(
node=node,
values=values,
node_config={zwave.CONF_REFRESH_VALUE: True, zwave.CONF_REFRESH_DELAY: 5},
)
assert not device.is_on
with patch.object(light, "Timer", MagicMock()) as mock_timer:
value.data = 46
value_changed(value)
assert not device.is_on
assert mock_timer.called
assert len(mock_timer.mock_calls) == 2
timeout, callback = mock_timer.mock_calls[0][1][:2]
assert timeout == 5
assert mock_timer().start.called
assert len(mock_timer().start.mock_calls) == 1
with patch.object(light, "Timer", MagicMock()) as mock_timer_2:
value_changed(value)
assert not device.is_on
assert mock_timer().cancel.called
assert len(mock_timer_2.mock_calls) == 2
timeout, callback = mock_timer_2.mock_calls[0][1][:2]
assert timeout == 5
assert mock_timer_2().start.called
assert len(mock_timer_2().start.mock_calls) == 1
callback()
assert device.is_on
assert device.brightness == 118
def test_set_hs_color(mock_openzwave):
"""Test setting zwave light color."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SWITCH_COLOR])
value = MockValue(data=0, node=node)
color = MockValue(data="#0000000000", node=node)
# Supports RGB only
color_channels = MockValue(data=0x1C, node=node)
values = MockLightValues(primary=value, color=color, color_channels=color_channels)
device = light.get_device(node=node, values=values, node_config={})
assert color.data == "#0000000000"
device.turn_on(**{ATTR_HS_COLOR: (30, 50)})
assert color.data == "#ffbf7f0000"
def test_set_white_value(mock_openzwave):
"""Test setting zwave light color."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SWITCH_COLOR])
value = MockValue(data=0, node=node)
color = MockValue(data="#0000000000", node=node)
# Supports RGBW
color_channels = MockValue(data=0x1D, node=node)
values = MockLightValues(primary=value, color=color, color_channels=color_channels)
device = light.get_device(node=node, values=values, node_config={})
assert color.data == "#0000000000"
device.turn_on(**{ATTR_WHITE_VALUE: 200})
assert color.data == "#ffffffc800"
def test_disable_white_if_set_color(mock_openzwave):
"""
Test that _white is set to 0 if turn_on with ATTR_HS_COLOR.
See Issue #13930 - many RGBW ZWave bulbs will only activate the RGB LED to
produce color if _white is set to zero.
"""
node = MockNode(command_classes=[const.COMMAND_CLASS_SWITCH_COLOR])
value = MockValue(data=0, node=node)
color = MockValue(data="#0000000000", node=node)
# Supports RGB only
color_channels = MockValue(data=0x1C, node=node)
values = MockLightValues(primary=value, color=color, color_channels=color_channels)
device = light.get_device(node=node, values=values, node_config={})
device._white = 234
assert color.data == "#0000000000"
assert device.white_value == 234
device.turn_on(**{ATTR_HS_COLOR: (30, 50)})
assert device.white_value == 0
assert color.data == "#ffbf7f0000"
def test_zw098_set_color_temp(mock_openzwave):
"""Test setting zwave light color."""
node = MockNode(
manufacturer_id="0086",
product_id="0062",
command_classes=[const.COMMAND_CLASS_SWITCH_COLOR],
)
value = MockValue(data=0, node=node)
color = MockValue(data="#0000000000", node=node)
# Supports RGB, warm white, cold white
color_channels = MockValue(data=0x1F, node=node)
values = MockLightValues(primary=value, color=color, color_channels=color_channels)
device = light.get_device(node=node, values=values, node_config={})
assert color.data == "#0000000000"
device.turn_on(**{ATTR_COLOR_TEMP: 200})
assert color.data == "#00000000ff"
device.turn_on(**{ATTR_COLOR_TEMP: 400})
assert color.data == "#000000ff00"
def test_rgb_not_supported(mock_openzwave):
"""Test value changed for rgb lights."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SWITCH_COLOR])
value = MockValue(data=0, node=node)
color = MockValue(data="#0000000000", node=node)
# Supports color temperature only
color_channels = MockValue(data=0x01, node=node)
values = MockLightValues(primary=value, color=color, color_channels=color_channels)
device = light.get_device(node=node, values=values, node_config={})
assert device.hs_color is None
def test_no_color_value(mock_openzwave):
"""Test value changed for rgb lights."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SWITCH_COLOR])
value = MockValue(data=0, node=node)
values = MockLightValues(primary=value)
device = light.get_device(node=node, values=values, node_config={})
assert device.hs_color is None
def test_no_color_channels_value(mock_openzwave):
"""Test value changed for rgb lights."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SWITCH_COLOR])
value = MockValue(data=0, node=node)
color = MockValue(data="#0000000000", node=node)
values = MockLightValues(primary=value, color=color)
device = light.get_device(node=node, values=values, node_config={})
assert device.hs_color is None
def test_rgb_value_changed(mock_openzwave):
"""Test value changed for rgb lights."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SWITCH_COLOR])
value = MockValue(data=0, node=node)
color = MockValue(data="#0000000000", node=node)
# Supports RGB only
color_channels = MockValue(data=0x1C, node=node)
values = MockLightValues(primary=value, color=color, color_channels=color_channels)
device = light.get_device(node=node, values=values, node_config={})
assert device.hs_color == (0, 0)
color.data = "#ffbf800000"
value_changed(color)
assert device.hs_color == (29.764, 49.804)
def test_rgbww_value_changed(mock_openzwave):
"""Test value changed for rgb lights."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SWITCH_COLOR])
value = MockValue(data=0, node=node)
color = MockValue(data="#0000000000", node=node)
# Supports RGB, Warm White
color_channels = MockValue(data=0x1D, node=node)
values = MockLightValues(primary=value, color=color, color_channels=color_channels)
device = light.get_device(node=node, values=values, node_config={})
assert device.hs_color == (0, 0)
assert device.white_value == 0
color.data = "#c86400c800"
value_changed(color)
assert device.hs_color == (30, 100)
assert device.white_value == 200
def test_rgbcw_value_changed(mock_openzwave):
"""Test value changed for rgb lights."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SWITCH_COLOR])
value = MockValue(data=0, node=node)
color = MockValue(data="#0000000000", node=node)
# Supports RGB, Cold White
color_channels = MockValue(data=0x1E, node=node)
values = MockLightValues(primary=value, color=color, color_channels=color_channels)
device = light.get_device(node=node, values=values, node_config={})
assert device.hs_color == (0, 0)
assert device.white_value == 0
color.data = "#c86400c800"
value_changed(color)
assert device.hs_color == (30, 100)
assert device.white_value == 200
def test_ct_value_changed(mock_openzwave):
"""Test value changed for zw098 lights."""
node = MockNode(
manufacturer_id="0086",
product_id="0062",
command_classes=[const.COMMAND_CLASS_SWITCH_COLOR],
)
value = MockValue(data=0, node=node)
color = MockValue(data="#0000000000", node=node)
# Supports RGB, Cold White
color_channels = MockValue(data=0x1F, node=node)
values = MockLightValues(primary=value, color=color, color_channels=color_channels)
device = light.get_device(node=node, values=values, node_config={})
assert device.color_temp == light.TEMP_MID_HASS
color.data = "#000000ff00"
value_changed(color)
assert device.color_temp == light.TEMP_WARM_HASS
color.data = "#00000000ff"
value_changed(color)
assert device.color_temp == light.TEMP_COLD_HASS
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_FAKE_QUANT_OPS_FUNCTOR_H_
#define TENSORFLOW_CORE_KERNELS_FAKE_QUANT_OPS_FUNCTOR_H_
#include <tuple>
#define EIGEN_STACK_ALLOCATION_LIMIT 0
#define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/types.h"
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float StdRound(float input) {
// On Android, std::round() isn't present, just round().
#if defined(__ANDROID__)
return round(input);
#else
return std::round(input);
#endif
}
namespace tensorflow {
// Gymnastics with nudged zero point is to ensure that real zero maps to
// an integer, which is required for e.g. zero-padding in convolutional layers.
// Outputs nudged_min, nudged_max, nudged_scale.
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void Nudge(
const float min, const float max, const int quant_min, const int quant_max,
float* nudged_min, float* nudged_max, float* scale, float* inv_scale) {
const float quant_min_float = static_cast<float>(quant_min);
const float quant_max_float = static_cast<float>(quant_max);
*scale = (max - min) / (quant_max_float - quant_min_float);
// Re-calculate the inverse to avoid loss of precision which would result
// from simply taking the reciprocal of *scale
*inv_scale = (quant_max_float - quant_min_float) / (max - min);
const float zero_point_from_min = quant_min_float - min / *scale;
const uint16_t nudged_zero_point = [zero_point_from_min, quant_min,
quant_min_float, quant_max,
quant_max_float] {
if (zero_point_from_min < quant_min_float) {
return static_cast<uint16_t>(quant_min);
}
if (zero_point_from_min > quant_max_float) {
return static_cast<uint16_t>(quant_max);
}
return static_cast<uint16_t>(StdRound(zero_point_from_min));
}();
*nudged_min = (quant_min_float - nudged_zero_point) * (*scale);
*nudged_max = (quant_max_float - nudged_zero_point) * (*scale);
}
template <typename T>
using ConstScalar = typename tensorflow::TTypes<T>::ConstScalar;
template <typename T>
using Scalar = typename tensorflow::TTypes<T>::Scalar;
template <typename T>
using ConstVec = typename tensorflow::TTypes<T>::ConstVec;
template <typename T>
using Vec = typename tensorflow::TTypes<T>::Vec;
template <typename T>
using ConstFlat = typename tensorflow::TTypes<T>::ConstFlat;
template <typename T>
using Flat = typename tensorflow::TTypes<T>::Flat;
// Functor called by FakeQuantWithMinMaxArgsOp to do the work. Compiles both
// for CPU and GPU.
template <typename Device>
struct FakeQuantWithMinMaxArgsFunctor {
void operator()(const Device& d, ConstFlat<float> inputs, const float min,
const float max, const int quant_min, const int quant_max,
Flat<float> outputs) {
eigen_assert(min <= 0.0f && "min should be <= 0.0");
eigen_assert(max >= 0.0f && "max should be >= 0.0");
eigen_assert(min < max && "min should be < max");
float nudged_min, nudged_max, nudged_scale, inv_nudged_scale;
Nudge(min, max, quant_min, quant_max, &nudged_min, &nudged_max,
&nudged_scale, &inv_nudged_scale);
const float quant_zero = floor(-nudged_min * inv_nudged_scale + 0.5f);
auto clamped = inputs.cwiseMin(nudged_max).cwiseMax(nudged_min);
auto clamped_shifted = clamped - nudged_min;
outputs.device(d) =
(clamped_shifted * inv_nudged_scale - quant_zero + 0.5f).floor() *
nudged_scale;
}
};
// Functor called by FakeQuantWithMinMaxArgsGradientOp to do the work. Compiles
// both for CPU and GPU.
template <typename Device>
struct FakeQuantWithMinMaxArgsGradientFunctor {
void operator()(const Device& d, ConstFlat<float> gradients,
ConstFlat<float> inputs, const float min, const float max,
const int quant_min, const int quant_max,
Flat<float> backprops) {
eigen_assert(min <= 0.0f && "min should be <= 0.0");
eigen_assert(max >= 0.0f && "max should be >= 0.0");
eigen_assert(min < max && "min should be < max");
float nudged_min, nudged_max, nudged_scale, inv_nudged_scale;
Nudge(min, max, quant_min, quant_max, &nudged_min, &nudged_max,
&nudged_scale, &inv_nudged_scale);
auto between_nudged_min_max =
(inputs >= nudged_min && inputs <= nudged_max)
.select(inputs.constant(1.0f), inputs.constant(0.0f));
backprops.device(d) = gradients * between_nudged_min_max;
}
};
// Functor called by FakeQuantWithMinMaxVarsOp to do the work. Compiles both
// for CPU and GPU.
template <typename Device>
struct FakeQuantWithMinMaxVarsFunctor {
void operator()(const Device& d, ConstFlat<float> inputs,
ConstScalar<float> min, ConstScalar<float> max,
const int quant_min, const int quant_max,
Flat<float> outputs) {
const float min_val = min();
const float max_val = max();
// If min and max are both zero, we should just return zero.
if (min_val == 0.0f && max_val == 0.0f) {
outputs.device(d) = outputs.constant(0.0f);
return;
}
float nudged_min, nudged_max, nudged_scale, inv_nudged_scale;
Nudge(min_val, max_val, quant_min, quant_max, &nudged_min, &nudged_max,
&nudged_scale, &inv_nudged_scale);
const float quant_zero = floor(-nudged_min * inv_nudged_scale + 0.5f);
const auto nudged_scale_repl = inputs.constant(nudged_scale);
// const auto inv_nudged_scale_repl = inputs.constant(inv_nudged_scale);
const auto clamped = inputs.cwiseMin(nudged_max).cwiseMax(nudged_min);
const auto clamped_shifted = clamped - nudged_min;
outputs.device(d) =
(clamped_shifted / nudged_scale_repl - quant_zero + 0.5f).floor() *
nudged_scale_repl;
}
};
// Functor called by FakeQuantWithMinMaxVarsGradientOp to do the work. Compiles
// both for CPU and GPU.
template <typename Device>
struct FakeQuantWithMinMaxVarsGradientFunctor {
void operator()(const Device& d, ConstFlat<float> gradients,
ConstFlat<float> inputs, ConstScalar<float> min,
ConstScalar<float> max, const int quant_min,
const int quant_max, Flat<float> backprops_wrt_input,
Scalar<float> backprop_wrt_min,
Scalar<float> backprop_wrt_max) {
const float min_val = min();
const float max_val = max();
// If min and max are both zero, we propagate everything to inputs.
if (min_val == 0.0f && max_val == 0.0f) {
backprops_wrt_input.device(d) = gradients;
backprop_wrt_min.device(d) = backprop_wrt_min.constant(0.0f);
backprop_wrt_max.device(d) = backprop_wrt_max.constant(0.0f);
return;
}
float nudged_min, nudged_max, nudged_scale, inv_nudged_scale;
Nudge(min_val, max_val, quant_min, quant_max, &nudged_min, &nudged_max,
&nudged_scale, &inv_nudged_scale);
const auto between_min_max =
(inputs >= nudged_min && inputs <= nudged_max)
.select(inputs.constant(1.0f), inputs.constant(0.0f));
backprops_wrt_input.device(d) = gradients * between_min_max;
const auto below_min =
(inputs < nudged_min)
.select(inputs.constant(1.0f), inputs.constant(0.0f));
backprop_wrt_min.device(d) = (gradients * below_min).sum();
const auto above_max =
(inputs > nudged_max)
.select(inputs.constant(1.0f), inputs.constant(0.0f));
backprop_wrt_max.device(d) = (gradients * above_max).sum();
}
};
using Index = typename tensorflow::TTypes<float>::ConstTensor::Index;
// Functor called by FakeQuantWithMinMaxVarsPerChannelOp to do the work.
// Compiles both for CPU and GPU.
//
// Already verified: inputs, outputs are of shape [b, d], min, max are of shape
// [d].
template <typename Device>
struct FakeQuantWithMinMaxVarsPerChannelFunctor {
void operator()(const Device& d, TTypes<float>::ConstMatrix inputs,
ConstVec<float> min, ConstVec<float> max, const int quant_min,
const int quant_max, TTypes<float>::Matrix outputs) {
for (Index i = 0; i < min.size(); ++i) {
const float min_val = min(i);
const float max_val = max(i);
// If min and max are both zero, we should just return zero.
if (min_val == 0.0f && max_val == 0.0f) {
auto chip = outputs.chip<1>(i);
chip.device(d) = chip.constant(0.0f);
continue;
}
float nudged_min, nudged_max, nudged_scale, inv_nudged_scale;
Nudge(min_val, max_val, quant_min, quant_max, &nudged_min, &nudged_max,
&nudged_scale, &inv_nudged_scale);
const float quant_zero = floor(-nudged_min * inv_nudged_scale + 0.5f);
const auto clamped =
inputs.chip<1>(i).cwiseMin(nudged_max).cwiseMax(nudged_min);
const auto clamped_shifted = clamped - nudged_min;
outputs.chip<1>(i).device(d) =
(clamped_shifted * inv_nudged_scale - quant_zero + 0.5f).floor() *
nudged_scale;
}
}
};
// Functor called by FakeQuantWithMinMaxVarsPerChannelGradientOp to do the work.
// Compiles both for CPU and GPU.
//
// Already verified: gradients, inputs, backprops_wrt_input are of shape [b, d],
// min, max, backprop_wrt_min, backprop_wrt_max are of shape [d].
template <typename Device>
struct FakeQuantWithMinMaxVarsPerChannelGradientFunctor {
void operator()(const Device& d, TTypes<float>::ConstMatrix gradients,
TTypes<float>::ConstMatrix inputs, ConstVec<float> min,
ConstVec<float> max, const int quant_min, const int quant_max,
TTypes<float>::Matrix backprops_wrt_input,
Vec<float> backprop_wrt_min, Vec<float> backprop_wrt_max) {
for (Index i = 0; i < min.size(); ++i) {
const float min_val = min(i);
const float max_val = max(i);
const auto gradients_chip = gradients.chip<1>(i);
const auto inputs_chip = inputs.chip<1>(i);
// If min and max are both zero, we propagate everything to inputs.
if (min_val == 0.0f && max_val == 0.0f) {
backprops_wrt_input.chip<1>(i).device(d) = gradients_chip;
auto min_chip = backprop_wrt_min.chip<0>(i);
auto max_chip = backprop_wrt_max.chip<0>(i);
min_chip.device(d) = min_chip.constant(0.0f);
max_chip.device(d) = max_chip.constant(0.0f);
continue;
}
float nudged_min, nudged_max, nudged_scale, inv_nudged_scale;
Nudge(min_val, max_val, quant_min, quant_max, &nudged_min, &nudged_max,
&nudged_scale, &inv_nudged_scale);
const auto between_min_max =
(inputs_chip >= nudged_min && inputs_chip <= nudged_max)
.select(inputs_chip.constant(1.0f), inputs_chip.constant(0.0f));
backprops_wrt_input.chip<1>(i).device(d) =
gradients_chip * between_min_max;
const auto below_min =
(inputs_chip < nudged_min)
.select(inputs_chip.constant(1.0f), inputs_chip.constant(0.0f));
Eigen::DSizes<Index, 1> reduce(0);
backprop_wrt_min.chip<0>(i).device(d) =
(gradients_chip * below_min).sum(reduce);
const auto above_max =
(inputs_chip > nudged_max)
.select(inputs_chip.constant(1.0f), inputs_chip.constant(0.0f));
backprop_wrt_max.chip<0>(i).device(d) =
(gradients_chip * above_max).sum(reduce);
}
}
};
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_FAKE_QUANT_OPS_FUNCTOR_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/core/kernels/fake_quant_ops_functor.h
|
import sys
from types import MethodType, ModuleType, FunctionType, InstanceType, ClassType
class Dummy: pass
class Patcher:
def __init__(self, entity):
self.ent_type = None
self.entity = entity
self.__determine_type()
def __determine_type(self):
if isinstance(self.entity, ClassType):
self.ent_type = 'C'
elif isinstance(self.entity, InstanceType):
self.ent_type = 'I'
elif isinstance(self.entity, ModuleType):
self.ent_type = 'M'
else:
raise Exception("Un-supported entity type %s" % type(self.entity))
def patch_class(self, old_class, new_class):
if self.ent_type != 'M':
raise Exception("Entity should be a module for patching a class")
class ModulePatcher:
def __init__(self, module):
self.module = module
self.patched = []
def __patch_class(self, old_class, new_class, name):
class_name = name
setattr(self.module, class_name + '_', old_class)
setattr(self.module, class_name , new_class)
def patch_class(self, old_class, new_class):
name = old_class.__name__
self.__patch_class(old_class, new_class, name)
self.patched.append(('C', name))
def __patch_function(self, old_func, new_func, old_name):
func_name = old_name
self.module.__dict__[func_name + '_'] = old_func
self.module.__dict__[func_name ] = new_func
def patch_function(self, old_func, new_func):
func_name = old_func.__name__
self.__patch_function(old_func, new_func, func_name)
self.patched.append(('F', func_name))
def rollback(self):
for objtype, name in self.patched:
if objtype == 'F':
self.rollback_function(name)
elif objtype == 'C':
self.rollback_class(name)
def rollback_function(self, name):
new_func = getattr(self.module, name)
old_func = getattr(self.module, name + '_')
self.__patch_function(new_func, old_func, name)
def rollback_class(self, name):
new_class = getattr(self.module, name)
old_class = getattr(self.module, name + '_')
self.__patch_class(new_class, old_class, name)
class ClassPatcher:
def __init__(self, cls):
self.cls = cls
def patch_method(self, old_func, new_func):
func_name = old_func.__name__
setattr(self.cls, func_name + '_', old_func)
setattr(self.cls, func_name, MethodType(new_func, None, self.cls))
def patch_classmethod(self, old_func, new_func):
func_name = old_func.__name__
setattr(self.cls, func_name + '_', old_func)
setattr(self.cls, func_name, MethodType(new_func, self.cls))
def patch_ctor(self, new_func):
self.patch_method(self.cls.__init__, new_func)
def patch_ctor_empty(self):
def empty(self, *args, **kargs): pass
self.patch_ctor(empty)
def patch_method_empty(self, old_func):
def empty(self, *args, **kargs): pass
self.patch_classmethod(old_func, MethodType(empty, self.cls))
def add_method_empty(self, func_name):
def empty(self, *args, **kargs): pass
setattr(self.cls, func_name, MethodType(empty, None, self.cls))
def add_function(self, func_name, func):
setattr(self.cls, func_name, MethodType(func, None, self.cls))
class ObjectPatcher:
def __init__(self, obj):
self.obj = obj
def patch_method(self, old_func, new_func):
func_name = old_func.__name__
setattr(self.obj, func_name + '_', old_func)
setattr(self.obj, func_name, MethodType(new_func, self.obj))
def patch_method_empty(self, old_func):
def empty(self, *args, **kargs): pass
self.patch_method(old_func, empty)
def add_method_empty(self, func_name):
def empty(self, *args, **kargs): pass
setattr(self.obj, func_name, empty)
def multi_setattr(obj, attr_str, value):
var_list = attr_str.split('.')
prev_dummy = None
for var_name in var_list[:-1]:
dummy = Dummy()
if prev_dummy:
if not hasattr(prev_dummy, var_name):
setattr(prev_dummy, var_name, dummy)
prev_dummy = dummy
else:
prev_dummy = getattr(prev_dummy, var_name)
else:
if not hasattr(obj, var_name):
setattr(obj, var_name, dummy)
prev_dummy = dummy
else:
prev_dummy = getattr(obj, var_name)
setattr(prev_dummy, var_list[-1], value)
def multi_setattr_empty_function(obj, attr_str):
def empty(self, *args, **kargs): pass
multi_setattr(obj, attr_str, MethodType(empty, obj))
def create_function(rpc, attr_str, value):
multi_setattr(rpc, attr_str, MethodType(value, rpc))
#def patch_module_function(module, old_func, new_func):
# func_name = old_func.__name__
# module.__dict__[func_name + '_'] = old_func
# module.__dict__[func_name ] = new_func
#
#
#def patch_instance_method(obj, old_func, new_func):
# func_name = old_func.__name__
# setattr(obj, func_name + '_', old_func)
# setattr(obj, func_name, MethodType(new_func, obj))
#
#
#def patch_class_method(cls, old_func, new_func):
# func_name = old_func.__name__
# setattr(cls, func_name + '_', old_func)
# setattr(cls, func_name, new_func)
#
#
#def patch_module_class(module, old_class, new_class):
# class_name = old_class.__name__
# setattr(module, class_name + '_', old_class)
# setattr(module, class_name , new_class)
if __name__ == "__main__":
def my_add(a,b):
return a*b
class MyPerson:
def __init__(self):
print "orig ctor"
self.name = "B"
def __getattr__(self, name):
return 1
def greet(self):
print type(self)
print "Hello", self.name
def g(self, name):
return 2
pt = ClassPatcher(MyPerson)
#pt.add_function('__getattr__', g)
p = MyPerson()
print p.abc
p.greet()
exit(0)
def test_mod():
print mod.add(10,20)
p = mod.Person()
p.greet()
print "-" * 10
mp = ModulePatcher(mod)
mp.patch_function(mod.add, my_add)
mp.patch_class(mod.Person, MyPerson)
test_mod()
mp.rollback()
test_mod()
mp.rollback()
test_mod()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Two Gold Stars
# Question 2: Combatting Link Spam
# One of the problems with our page ranking system is pages can
# collude with each other to improve their page ranks. We consider
# A->B a reciprocal link if there is a link path from B to A of length
# equal to or below the collusion level, k. The length of a link path
# is the number of links which are taken to travel from one page to the
# other.
# If k = 0, then a link from A to A is a reciprocal link for node A,
# since no links needs to be taken to get from A to A.
# If k=1, B->A would count as a reciprocal link if there is a link
# A->B, which includes one link and so is of length 1. (it requires
# two parties, A and B, to collude to increase each others page rank).
# If k=2, B->A would count as a reciprocal link for node A if there is
# a path A->C->B, for some page C, (link path of length 2),
# or a direct link A-> B (link path of length 1).
# Modify the compute_ranks code to
# - take an extra input k, which is a non-negative integer, and
# - exclude reciprocal links of length up to and including k from
# helping the page rank.
def is_reciprocal(graph, source, destination, k):
if k == 0:
if destination == source:
return True
return False
if source in graph[destination]:
return True
for node in graph[destination]:
if is_reciprocal(graph, source, node, k - 1):
return True
return False
def compute_ranks(graph, k):
d = 0.8 # damping factor
numloops = 10
ranks = {}
npages = len(graph)
for page in graph:
ranks[page] = 1.0 / npages
for i in range(0, numloops):
newranks = {}
for page in graph:
newrank = (1 - d) / npages
for node in graph:
if page in graph[node]:
if not is_reciprocal(graph, node, page, k):
newrank += d * (ranks[node] / len(graph[node]))
# if node is not page komutu k = 0 icin calisiyor.
# k ilerledikce diger iliskileri nasil kontrol ederiz?
newranks[page] = newrank
ranks = newranks
return ranks
# For example
g = {'a': ['a', 'b', 'c'], 'b':['a'], 'c':['d'], 'd':['a']}
# print compute_ranks(g, 0) # the a->a link is reciprocal
# >>> {'a': 0.26676872354238684, 'c': 0.1216391112164609,
# 'b': 0.1216391112164609, 'd': 0.1476647842238683}
print compute_ranks(g, 1) # a->a, a->b, b->a links are reciprocal
#>>> {'a': 0.14761759762962962, 'c': 0.08936469270123457,
# 'b': 0.04999999999999999, 'd': 0.12202199703703702}
#print compute_ranks(g, 2)
# a->a, a->b, b->a, a->c, c->d, d->a links are reciprocal
# (so all pages end up with the same rank)
#>>> {'a': 0.04999999999999999, 'c': 0.04999999999999999,
# 'b': 0.04999999999999999, 'd': 0.04999999999999999}
|
unknown
|
codeparrot/codeparrot-clean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.