content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import numpy as np
import matplotlib.pyplot as plt
#LOAD IMAGE
name='luna-1.jpeg'
x=plt.imread(name)
#ROTATE BY SWITCHING X AND Y DIMENSIONS
if(name=="luna-2.jpeg"):
plt.imshow(x); plt.show()
x=np.transpose(x,axes=[1,0,2])
#SHOW IMAGE
plt.imshow(x); plt.show()
#QUICK INFO ON IMAGE
get_info(x)
#CROP
plt.imshow(x[0:int(0.45*x.shape[0]),:]); plt.show()
plt.imshow(x[:,0:int(0.45*x.shape[0])]); plt.show()
#SURFACE PLOT
#REDUCE RESOLUTION-1
from skimage.transform import rescale, resize, downscale_local_mean
factor=10
if(name=="luna-2.jpeg"): factor=50
x = resize(x, (x.shape[0] // factor, x.shape[1] // factor), anti_aliasing=True)
get_info(x)
plt.imshow(x); plt.show()
#SURFACE PLOT
from skimage.color import rgb2gray
tmp=rgb2gray(x)
surface_plot(tmp)
| [
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
2,
35613,
8959,
11879,
198,
3672,
11639,
75,
9613,
12,
16,
13,
73,
22071,
6,
198,
87,
28,
489,
83,
13,
320,
961,
7,
3672,
... | 2.09434 | 371 |
import dataclasses
import enum
import os
import typing
import dacite
# import jsonschema
import yaml
import default
import util
@dataclasses.dataclass
@dataclasses.dataclass
@dataclasses.dataclass
@dataclasses.dataclass
@dataclasses.dataclass
@dataclasses.dataclass
class EnumValueYamlDumper(yaml.SafeDumper):
'''
a yaml.SafeDumper that will dump enum objects using their values.
'''
def get_buildfile_path(path: str, image_name: str) -> str:
'''
Returns the path of the buildfile.
:param path: The path of the image directory.
'''
image_dir = util.get_image_dir(path, image_name)
buildfile = os.path.join(
image_dir,
default.Config.BUILDFILE_NAME.value)
if not os.path.isfile(buildfile):
raise ValueError(f'buildfile does not exist: {buildfile}')
return buildfile
def get_build_config(
path: str,
image_name: str) -> typing.Optional[ImageBuildConfig]:
'''
Returns an ImageBuildConfig object from the default buildfile
located in the image directory.
:param path: The path of the images directory.
:param name: Name of the image, must exist as a directory.
'''
buildfile_path = get_buildfile_path(path, image_name)
return ImageBuildConfig.from_dict(
util.load_yaml(
buildfile_path
)
)
| [
11748,
4818,
330,
28958,
198,
11748,
33829,
198,
11748,
28686,
198,
11748,
19720,
198,
198,
11748,
288,
330,
578,
198,
2,
1330,
44804,
684,
2395,
2611,
198,
11748,
331,
43695,
198,
198,
11748,
4277,
198,
11748,
7736,
628,
628,
198,
31,
... | 2.599617 | 522 |
from django.contrib.auth import get_user_model
from django.contrib.postgres.fields import ArrayField
from django.core.validators import FileExtensionValidator
from django.db import models
from utils.model_utils import PathAndRename, default_1d_array
from utils.slug import slugify
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
7353,
34239,
13,
25747,
1330,
15690,
15878,
198,
6738,
42625,
14208,
13,
7295,
13,
12102,
2024,
1330,
9220,
11627,... | 3.357143 | 84 |
from maya import cmds
from maya import mel
commandDict = {}
commandDict['uvTextureEditor'] = "textureEditor.png"
commandDict['uVSetEditor'] = "sphere.png"
commandDict['uvProjection_automatic'] = "polyAutoProj.png"
commandDict['uvProjection_automatic_options'] = "polyAutoProj.png"
commandDict['bestPlaneTexturingTool'] = "bestPlaneTxt.png"
commandDict['uvProjection_cameraBased'] = "polyCameraUVs.png"
commandDict['uvProjection_cameraBased_options'] = "polyCameraUVs.png"
| [
6738,
743,
64,
1330,
23991,
82,
198,
6738,
743,
64,
1330,
7758,
628,
198,
21812,
35,
713,
796,
23884,
628,
628,
628,
628,
198,
198,
21812,
35,
713,
17816,
14795,
32742,
17171,
20520,
796,
366,
41293,
17171,
13,
11134,
1,
198,
21812,
... | 2.909639 | 166 |
msg = ('Hello World')
print(msg) | [
19662,
796,
19203,
15496,
2159,
11537,
198,
198,
4798,
7,
19662,
8
] | 2.75 | 12 |
import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import requests, os
from gwpy.timeseries import TimeSeries
from gwosc.locate import get_urls
from gwosc import datasets
from gwosc.api import fetch_event_json
from copy import deepcopy
import base64
# Use the non-interactive Agg backend, which is recommended as a
# thread-safe backend.
# See https://matplotlib.org/3.3.2/faq/howto_faq.html#working-with-threads.
import matplotlib as mpl
mpl.use("agg")
##############################################################################
# Workaround for the limited multi-threading support in matplotlib.
# Per the docs, we will avoid using `matplotlib.pyplot` for figures:
# https://matplotlib.org/3.3.2/faq/howto_faq.html#how-to-use-matplotlib-in-a-web-application-server.
# Moreover, we will guard all operations on the figure instances by the
# class-level lock in the Agg backend.
##############################################################################
from matplotlib.backends.backend_agg import RendererAgg
_lock = RendererAgg.lock
# -- Set page config
apptitle = 'GW Quickview'
st.set_page_config(page_title=apptitle, page_icon=":eyeglasses:")
# -- Default detector list
detectorlist = ['H1','L1', 'V1']
# Title the app
st.title('Gravitational Wave Quickview')
st.markdown("""
* Use the menu at left to select data and set plot parameters
* Your plots will appear below
""")
@st.cache #-- Magic command to cache data
st.sidebar.markdown("## Select Data Time and Detector")
# -- Get list of events
# find_datasets(catalog='GWTC-1-confident',type='events')
eventlist = datasets.find_datasets(type='events')
eventlist = [name.split('-')[0] for name in eventlist if name[0:2] == 'GW']
eventset = set([name for name in eventlist])
eventlist = list(eventset)
eventlist.sort()
#-- Set time by GPS or event
select_event = st.sidebar.selectbox('How do you want to find data?',
['By event name', 'By GPS'])
if select_event == 'By GPS':
# -- Set a GPS time:
str_t0 = st.sidebar.text_input('GPS Time', '1126259462.4') # -- GW150914
t0 = float(str_t0)
st.sidebar.markdown("""
Example times in the H1 detector:
* 1126259462.4 (GW150914)
* 1187008882.4 (GW170817)
* 933200215 (hardware injection)
* 1132401286.33 (Koi Fish Glitch)
""")
else:
chosen_event = st.sidebar.selectbox('Select Event', eventlist)
t0 = datasets.event_gps(chosen_event)
detectorlist = list(datasets.event_detectors(chosen_event))
detectorlist.sort()
st.subheader(chosen_event)
st.write('GPS:', t0)
# -- Experiment to display masses
try:
jsoninfo = fetch_event_json(chosen_event)
for name, nameinfo in jsoninfo['events'].items():
st.write('Mass 1:', nameinfo['mass_1_source'], 'M$_{\odot}$')
st.write('Mass 2:', nameinfo['mass_2_source'], 'M$_{\odot}$')
#st.write('Distance:', int(nameinfo['luminosity_distance']), 'Mpc')
st.write('Network SNR:', int(nameinfo['network_matched_filter_snr']))
eventurl = 'https://gw-osc.org/eventapi/html/event/{}'.format(chosen_event)
st.markdown('Event page: {}'.format(eventurl))
st.write('\n')
except:
pass
#-- Choose detector as H1, L1, or V1
detector = st.sidebar.selectbox('Detector', detectorlist)
# -- Create sidebar for plot controls
st.sidebar.markdown('## Set Plot Parameters')
dtboth = st.sidebar.slider('Time Range (seconds)', 0.1, 8.0, 1.0) # min, max, default
dt = dtboth / 2.0
st.sidebar.markdown('#### Whitened and band-passed data')
whiten = st.sidebar.checkbox('Whiten?', value=True)
freqrange = st.sidebar.slider('Band-pass frequency range (Hz)', min_value=10, max_value=2000, value=(30,400))
# -- Create sidebar for Q-transform controls
st.sidebar.markdown('#### Q-tranform plot')
vmax = st.sidebar.slider('Colorbar Max Energy', 10, 500, 25) # min, max, default
qcenter = st.sidebar.slider('Q-value', 5, 120, 5) # min, max, default
qrange = (int(qcenter*0.8), int(qcenter*1.2))
#-- Create a text element and let the reader know the data is loading.
strain_load_state = st.text('Loading data...this may take a minute')
try:
strain_data = load_gw(t0, detector)
except:
st.text('Data load failed. Try a different time and detector pair.')
st.text('Problems can be reported to gwosc@igwn.org')
raise st.ScriptRunner.StopException
strain_load_state.text('Loading data...done!')
#-- Make a time series plot
cropstart = t0-0.2
cropend = t0+0.1
cropstart = t0 - dt
cropend = t0 + dt
st.subheader('Raw data')
center = int(t0)
strain = deepcopy(strain_data)
with _lock:
fig1 = strain.crop(cropstart, cropend).plot()
#fig1 = cropped.plot()
st.pyplot(fig1, clear_figure=True)
# -- Try whitened and band-passed plot
# -- Whiten and bandpass data
st.subheader('Whitened and Band-passed Data')
if whiten:
white_data = strain.whiten()
bp_data = white_data.bandpass(freqrange[0], freqrange[1])
else:
bp_data = strain.bandpass(freqrange[0], freqrange[1])
bp_cropped = bp_data.crop(cropstart, cropend)
with _lock:
fig3 = bp_cropped.plot()
st.pyplot(fig3, clear_figure=True)
# -- Allow data download
download = {'Time':bp_cropped.times, 'Strain':bp_cropped.value}
df = pd.DataFrame(download)
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
href = f'<a href="data:file/csv;base64,{b64}">Download Data as CSV File</a>'
st.markdown(href, unsafe_allow_html=True)
# -- Notes on whitening
with st.beta_expander("See notes"):
st.markdown("""
* Whitening is a process that re-weights a signal, so that all frequency bins have a nearly equal amount of noise.
* A band-pass filter uses both a low frequency cutoff and a high frequency cutoff, and only passes signals in the frequency band between these values.
See also:
* [Signal Processing Tutorial](https://share.streamlit.io/jkanner/streamlit-audio/main/app.py)
""")
st.subheader('Q-transform')
hq = strain.q_transform(outseg=(t0-dt, t0+dt), qrange=qrange)
with _lock:
fig4 = hq.plot()
ax = fig4.gca()
fig4.colorbar(label="Normalised energy", vmax=vmax, vmin=0)
ax.grid(False)
ax.set_yscale('log')
ax.set_ylim(bottom=15)
st.pyplot(fig4, clear_figure=True)
with st.beta_expander("See notes"):
st.markdown("""
A Q-transform plot shows how a signal’s frequency changes with time.
* The x-axis shows time
* The y-axis shows frequency
The color scale shows the amount of “energy” or “signal power” in each time-frequency pixel.
A parameter called “Q” refers to the quality factor. A higher quality factor corresponds to a larger number of cycles in each time-frequency pixel.
For gravitational-wave signals, binary black holes are most clear with lower Q values (Q = 5-20), where binary neutron star mergers work better with higher Q values (Q = 80 - 120).
See also:
* [GWpy q-transform](https://gwpy.github.io/docs/stable/examples/timeseries/qscan.html)
* [Reading Time-frequency plots](https://labcit.ligo.caltech.edu/~jkanner/aapt/web/math.html#tfplot)
* [Shourov Chatterji PhD Thesis](https://dspace.mit.edu/handle/1721.1/34388)
""")
st.subheader("About this app")
st.markdown("""
This app displays data from LIGO, Virgo, and GEO downloaded from
the Gravitational Wave Open Science Center at https://gw-openscience.org .
You can see how this works in the [Quickview Jupyter Notebook](https://github.com/losc-tutorial/quickview) or
[see the code](https://github.com/jkanner/streamlit-dataview).
""")
| [
11748,
4269,
18250,
355,
336,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
11748,
7007,
11,
28686,
198,
6738,
308,
86,
9078,
13,
2235... | 2.699965 | 2,853 |
import pandas as pa
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import Imputer,LabelEncoder,OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
#%%
datset=pa.read_csv('district_wise_crop_success.csv')
datset_state=datset.drop_duplicates('state')['state']
#%%
ll=[]
for i in datset_state:
distinct_state_crop=datset.loc[(datset['state'] == i)].drop_duplicates('crop')['crop']
for j in distinct_state_crop:
l=[]
crop_vals=datset.loc[(datset['state'] == i) & (datset['crop'] == j)]['success_rate']
mean_success=crop_vals.mean()
l=[i,j,mean_success]
ll.append(l)
#%%
p=pa.DataFrame(ll)
p.to_csv('state_wise_crop_success.csv') | [
11748,
19798,
292,
355,
14187,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
220,
220,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
1846,
10549,
11,
33986,
27195,
12342,
11,
3198,
21... | 2.455357 | 336 |
import build
import numpy as np
from collections import defaultdict
import pickle
import random
random.seed = 1
from mm.path_inference_private.proj_templates import get_em_evaluation_fnames, get_evaluation_em_data_file
from mm.path_inference_private.evaluation import LEARNING_METHOD_IDX, METRIC_NAME_IDX, STRAT_NAME_IDX
from mm.path_inference_private.plot_utils import *
import pylab as pl
import matplotlib.pyplot as plt
__author__ = 'tjhunter'
res = 60
all_em_evals = get_all_eval_em(res=res)
all_evals = {}
all_evals[res] = get_all_eval(res=res)
all_evals[res].update(all_em_evals)
num_boot_samples = 1000
strategy = 'VITERBI'
learning_display = {'most_likely_simple' : 'MaxLL - simple', \
'em_simple' : 'EM - simple', \
'most_likely_fancy' : 'MaxLL - complex', \
'em_large_simple_1' : 'EM - simple (large)', \
'em_large_fancy_1' : 'EM - complex (large)', \
}
learning_methods = list(learning_display.keys())
learning_methods.sort()
#learning_methods = ['most_likely_simple', 'most_likely_fancy', 'em_simple', 'em_large_simple_1', 'em_large_simple_2', 'em_large_simple_3', 'em_large_simple_4', 'em_large_simple_5', 'em_large_fancy_1', 'em_large_fancy_2', 'em_large_fancy_3', 'em_large_fancy_4', 'em_large_fancy_5']
num_methods = len(learning_methods)
strategy = 'VITERBI'
data_by_method = []
median_vals = []
mean_vals = []
lower_percentile = []
upper_percentile = []
for learning in learning_methods:
data = get_true_point_by_strat(all_eval=all_evals[res],
learning_method=learning)[strategy]
data_by_method.append(data)
stat_dis = bootstrap_percent_wrong(data, num_boot_samples)
median_vals.append(stat_dis[int(num_boot_samples*0.5)])
mean_vals.append(np.mean(stat_dis))
lower_percentile.append(stat_dis[int(num_boot_samples*0.20)])
upper_percentile.append(stat_dis[int(num_boot_samples*0.80)])
fig = pl.figure(1, figsize=(10,8))
fig.clf()
ax = fig.add_subplot(111)
plt.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.30)
xs = np.arange(num_methods)+1
plot_error_bar(ax, xs, mean_vals, lower_percentile, upper_percentile, fmt='ko')
ax.set_xlim(0.5, num_methods+0.5)
ax.set_ylim(0.05, 0.07)
ax.set_title("Comparison of point assignment errors for 1-minute sampling intervals \n (Viterbi reconstruction)")
ax.set_xticks(xs)
xtickNames = ax.set_xticklabels([learning_display[method_name] for method_name in learning_methods])
plt.setp(xtickNames, rotation=60, fontsize=12)
ax.set_xlabel("Learning method")
ax.set_ylabel("Proportion of false point assignments")
build.save_figure(fig,"figures-pif/em_true_points_percentage")
#fig.savefig("%s/em_true_points_percentage.pdf"%saving_dir())
''' TRUE PATHS
'''
strategy = 'VITERBI'
median_vals = []
mean_vals = []
lower_percentile = []
upper_percentile = []
for learning in learning_methods:
data = get_true_path_by_strat(all_eval=all_evals[res], learning_method=learning)[strategy]
stat_dis = bootstrap_percent_wrong(data, num_boot_samples)
median_vals.append(stat_dis[int(num_boot_samples*0.5)])
mean_vals.append(np.mean(stat_dis))
lower_percentile.append(stat_dis[int(num_boot_samples*0.20)])
upper_percentile.append(stat_dis[int(num_boot_samples*0.80)])
fig = pl.figure(2, figsize=(10,8))
fig.clf()
ax = fig.add_subplot(111)
plt.subplots_adjust(left=0.1, right=0.98, top=0.9, bottom=0.30)
xs = np.arange(num_methods)+1
plot_error_bar(ax, xs, mean_vals, lower_percentile, upper_percentile, fmt='ko')
ax.set_xlim(0.5, num_methods+0.5)
ax.set_title("Comparison of path assignment errors for 1-minute sampling intervals \n (Viterbi reconstruction)")
ax.set_xticks(xs)
xtickNames = ax.set_xticklabels([learning_display[method_name] for method_name in learning_methods])
plt.setp(xtickNames, rotation=60, fontsize=12)
ax.set_xlabel("Learning method")
ax.set_ylabel("Proportion of false path assignments")
build.save_figure(fig,'figures-pif/em_true_paths_percentage')
#fig.savefig("%s/em_true_paths_percentage.pdf"%saving_dir())
''' LL Paths
'''
strategy = 'LAGGED2'
median_vals = []
mean_vals = []
lower_percentile = []
upper_percentile = []
outliers = []
for learning in learning_methods:
data = get_paths_ll_by_strat(all_eval=all_evals[res], learning_method=learning)[strategy]
data.sort()
median_vals.append(data[int(len(data)*0.5)])
mean_vals.append(np.mean(data))
lower_percentile.append(data[int(len(data)*0.1)])
up_idx = int(len(data)*0.9)
upper_percentile.append(data[up_idx])
outliers.append(data[up_idx:][::3])
fig = pl.figure(3, figsize=(10,8))
fig.clf()
ax = fig.add_subplot(111)
plt.subplots_adjust(left=0.1, right=0.98, top=0.9, bottom=0.30)
xs = np.arange(num_methods)+1
plot_error_bar(ax, xs, mean_vals, lower_percentile, upper_percentile, fmt='ko')
ax.set_xlim(0.5, num_methods+0.5)
ax.set_ylim(0.0, 25)
ax.set_title("Comparison of the log-likelihoods of the true paths for 1-minute sampling intervals \n (2-lagged smoothing reconstruction)")
ax.set_xticks(xs)
xtickNames = ax.set_xticklabels([learning_display[method_name] for method_name in learning_methods])
plt.setp(xtickNames, rotation=60, fontsize=12)
ax.set_xlabel("Learning method")
ax.set_ylabel("Log-likelihood of true path")
build.save_figure(fig, "figures-pif/em_ll_paths")
#fig.savefig("%s/em_ll_paths.pdf"%saving_dir())
""" NOT USED IN PAPER BEYOND THIS LINE
"""
print "early system exit"
import sys
sys.exit(0)
""" Entropy over paths.
"""
strategy = 'OFFLINE'
median_vals = []
mean_vals = []
lower_percentile = []
upper_percentile = []
for learning in learning_methods:
data = get_paths_entropy_by_strat(all_eval=all_evals[res], learning_method=learning)[strategy]
data.sort()
median_vals.append(data[int(len(data)*0.5)])
mean_vals.append(np.mean(data))
lower_percentile.append(data[int(len(data)*0.05)])
upper_percentile.append(data[int(len(data)*0.95)])
fig = pl.figure(3)
fig.clf()
ax = fig.gca()
ax.hold(True)
xs = np.arange(num_methods)+1
plot_error_bar(ax, xs, mean_vals, lower_percentile, upper_percentile, fmt='o')
ax.set_xlim(0.5, num_methods+0.5)
ax.set_xticks(xs)
xtickNames = ax.set_xticklabels(learning_methods)
plt.setp(xtickNames, rotation=80, fontsize=12)
''' PATH RELATIVE COVERAGE.
'''
strategy = 'ONLINE'
median_vals = []
mean_vals = []
lower_percentile = []
upper_percentile = []
for learning in learning_methods:
data = get_paths_relative_coverage_by_strat(all_eval=all_evals[res], learning_method=learning)[strategy]
data.sort()
median_vals.append(data[int(len(data)*0.5)])
mean_vals.append(np.mean(data))
lower_percentile.append(data[int(len(data)*0.10)])
upper_percentile.append(data[int(len(data)*0.88)])
fig = pl.figure(5)
fig.clf()
ax = fig.gca()
ax.hold(True)
xs = np.arange(num_methods)+1
plot_error_bar(ax, xs, median_vals, lower_percentile, upper_percentile, fmt='o')
ax.set_xlim(0.5, num_methods+0.5)
ax.set_xticks(xs)
xtickNames = ax.set_xticklabels(learning_methods)
plt.setp(xtickNames, rotation=80, fontsize=12)
| [
11748,
1382,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
17268,
1330,
4277,
11600,
198,
11748,
2298,
293,
198,
11748,
4738,
198,
25120,
13,
28826,
796,
352,
198,
6738,
8085,
13,
6978,
62,
259,
4288,
62,
19734,
13,
1676,
73,
62... | 2.419333 | 2,907 |
import glob
import os
import sys
import time
from random import randint
import cv2
import numpy as np
import torch
from PIL import Image
from models import *
from utils.datasets import *
from utils.utils import *
# from utils.utils import xyxy2xywh
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
trackerTypes = [
'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT'
]
if __name__ == "__main__":
#################################################
cfg = './yolov3-cbam.cfg'
img_size = 416
weight_path = './best.pt'
img_file = "./test.jpg" #"./images/train2014/0137-2112.jpg"
data_cfg = "./dataset1.data"
conf_thres = 0.5
nms_thres = 0.5
device = torch_utils.select_device()
trackerType = "BOOSTING"
videoPath = "./demo.mp4"
display_width = 800
display_height = 600
#################################################
yolo = InferYOLOv3(cfg,
img_size,
weight_path,
data_cfg,
device,
conf_thres=conf_thres,
nms_thres=nms_thres)
cap = cv2.VideoCapture(videoPath)
_, frame = cap.read()
bbox_xyxy, cls_conf, cls_ids = yolo.predict(frame)
print("Shape of Frame:", frame.shape)
print("Using %s algorithm." % trackerType)
bboxes = []
colors = []
if bbox_xyxy is not None:
for i in range(len(bbox_xyxy)):
# we need left, top, w, h
bbox_cxcywh = coordTrans(bbox_xyxy)
bboxes.append(
tuple(int(bbox_cxcywh[i][j].tolist()) for j in range(4)))
colors.append((randint(64, 255), randint(64,
255), randint(64, 255)))
print('Selected bounding boxes {}[x1,y1,w,h]'.format(bboxes))
del yolo
# '''
# test for the first image
# '''
# for i, bbox in enumerate(bboxes):
# p1 = (int(bbox[0]), int(bbox[1]))
# p2 = (int(bbox[0]+bbox[2]), int(bbox[1]+bbox[3]))
# cv2.rectangle(frame, p1, p2, colors[i], 2, 1)
# cv2.imwrite("./test_output.jpg", frame)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter("out.avi", fourcc, 24,
(frame.shape[1], frame.shape[0]))
multiTracker = cv2.MultiTracker_create()
# Initialize MultiTracker
for bbox in bboxes:
multiTracker.add(createTrackerByName(trackerType), frame, bbox)
# cv2.namedWindow("test", cv2.WINDOW_NORMAL)
# cv2.resizeWindow("test", display_width, display_height)
cnt = 0
# Process video and track objects
while cap.isOpened():
success, frame = cap.read()
cnt += 1
print(cnt, end='\r')
sys.stdout.flush()
if cnt > 1000:
break
if not success:
break
# get updated location of objects in subsequent frames
success, boxes = multiTracker.update(frame)
# draw tracked objects
for i, newbox in enumerate(boxes):
# x1,y1,w,h =
p1 = (int(newbox[0]), int(newbox[1]))
p2 = (int(newbox[2]+newbox[0]), int(newbox[1]+newbox[3]))
cv2.rectangle(frame, p1, p2, colors[i], 2, 1)
out.write(frame)
# show frame
# cv2.imshow('MultiTracker', frame)
# quit on ESC button
# if cv2.waitKey(1) & 0xFF == 27: # Esc pressed
# break
os.system("mv out.avi %s.avi"%(trackerType))
os.system("ffmpeg -y -i out.avi -r 10 -b:a 32k output.mp4")
| [
11748,
15095,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
6738,
4738,
1330,
43720,
600,
198,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
350,
4146,
1330,
7412,
198,
198,
673... | 2.023203 | 1,767 |
import os
import numpy as np
import pandas as pd
from scipy.misc import imread
import tensorflow as tf
from six.moves import urllib
import keras
from keras.models import Sequential
from keras.layers import Dense, Flatten, Reshape, InputLayer
from keras.regularizers import L1L2
from scipy.misc import imsave
import gzip
import os
import sys
import time
import csv
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
print('Hi')
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
#data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
def maybe_download(filename):
"""Download the data from Yann's website, unless it's already here."""
root_dir = os.path.abspath('.')
data_dir = os.path.join(root_dir, 'Data')
WORK_DIRECTORY = data_dir
print('a')
if not tf.gfile.Exists(WORK_DIRECTORY):
print('b')
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
print('c')
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
print('d')
return filepath
# to stop potential randomness
seed = 128
rng = np.random.RandomState(seed)
# set path
root_dir = os.path.abspath('.')
data_dir = os.path.join(root_dir, 'Data')
print('data dir')
print(data_dir)
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
print('e')
print(train_data_filename)
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
if not os.path.isdir("mnist/train-images"):
os.makedirs("mnist/train-images")
if not os.path.isdir("mnist/test-images"):
os.makedirs("mnist/test-images")
# process train data
with open("mnist/train-labels.csv", 'w') as csvFile:
writer = csv.writer(csvFile, delimiter=',', quotechar='"')
for i in range(len(train_data)):
imsave("mnist/train-images/" + str(i) + ".jpg", train_data[i][:,:,0])
writer.writerow(["train-images/" + str(i) + ".jpg", train_labels[i]])
# repeat for test data
with open("mnist/test-labels.csv", 'w') as csvFile:
writer = csv.writer(csvFile, delimiter=',', quotechar='"')
for i in range(len(test_data)):
imsave("mnist/test-images/" + str(i) + ".jpg", test_data[i][:,:,0])
writer.writerow(["test-images/" + str(i) + ".jpg", test_labels[i]])
# load data
train = pd.read_csv(os.path.join('D:\\gan\\mnist','train-labels.csv'))
test = pd.read_csv(os.path.join('D:\\gan\\mnist', 'test-labels.csv'))
print('ds')
print(train)
temp = []
for index,row in train.iterrows():
print("heres")
print(row)
print("dg")
print(row[0])
print("ddg")
print(row[1])
image_path = os.path.join(data_dir, 'train-images', img_name)
img = imread(image_path, flatten=True)
img = img.astype('float32')
temp.append(img)
train_x = np.stack(temp)
train_x = train_x / 255.
# print image
img_name = rng.choice(train.filename)
image_path = os.path.join(data_dir, 'train-images', img_name)
img = imread(filepath, flatten=True)
pylab.imshow(img, cmap='gray')
pylab.axis('off')
pylab.show()
g_input_shape = 100
d_input_shape = (28, 28)
hidden_1_num_units = 500
hidden_2_num_units = 500
g_output_num_units = 784
d_output_num_units = 1
epochs = 25
batch_size = 128
# generator
model_1 = Sequential([
Dense(units=hidden_1_num_units, input_dim=g_input_shape, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),
Dense(units=hidden_2_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),
Dense(units=g_output_num_units, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)),
Reshape(d_input_shape),
])
# discriminator
model_2 = Sequential([
InputLayer(input_shape=d_input_shape),
Flatten(),
Dense(units=hidden_1_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),
Dense(units=hidden_2_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),
Dense(units=d_output_num_units, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)),
])
from keras_adversarial import AdversarialModel, simple_gan, gan_targets
from keras_adversarial import AdversarialOptimizerSimultaneous, normal_latent_sampling
gan = simple_gan(model_1, model_2, normal_latent_sampling((100,)))
model = AdversarialModel(base_model=gan,player_params=[model_1.trainable_weights, model_2.trainable_weights])
model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=['adam', 'adam'], loss='binary_crossentropy')
history = model.fit(x=train_x, y=gan_targets(train_x.shape[0]), epochs=10, batch_size=batch_size)
plt.plot(history.history['player_0_loss'])
plt.plot(history.history['player_1_loss'])
plt.plot(history.history['loss'])
zsamples = np.random.normal(size=(10, 100))
pred = model_1.predict(zsamples)
for i in range(pred.shape[0]):
plt.imshow(pred[i, :], cmap='gray')
plt.show()
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
629,
541,
88,
13,
44374,
1330,
545,
961,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
2237,
13,
76,
5241,
1330,
2956,
297... | 2.505125 | 2,439 |
from behave import given, when, then
@given(u'this step exists')
@when(u'I run "python manage.py behave"')
@then(u'I should see the behave tests run')
@then(u'django_ready should be called')
| [
6738,
17438,
1330,
1813,
11,
618,
11,
788,
628,
198,
31,
35569,
7,
84,
470,
14363,
2239,
7160,
11537,
628,
198,
31,
12518,
7,
84,
6,
40,
1057,
366,
29412,
6687,
13,
9078,
17438,
1,
11537,
628,
198,
31,
8524,
7,
84,
6,
40,
815,
... | 2.970149 | 67 |
from flask import Flask, jsonify, request, redirect, session, render_template, url_for
from flask_mail import Mail, Message
import uuid
from app import db, serializer, mail
from itsdangerous import URLSafeTimedSerializer, SignatureExpired
| [
6738,
42903,
1330,
46947,
11,
33918,
1958,
11,
2581,
11,
18941,
11,
6246,
11,
8543,
62,
28243,
11,
19016,
62,
1640,
201,
198,
6738,
42903,
62,
4529,
1330,
11099,
11,
16000,
201,
198,
11748,
334,
27112,
201,
198,
6738,
598,
1330,
20613... | 3.424658 | 73 |
"""A CSV annotation writer that reads the bbox in x, y, w, h format."""
from discolight.annotations import BoundingBox
from .types import CSVRow, CSVAnnotationLoader
class WidthHeightCSV(CSVAnnotationLoader):
"""Loads annotations from a CSV file in the following format.
image_name, x_min, y_min, width, height, label
"""
def get_csv_row(self, row):
"""Return the image and annotation from a CSV row."""
x_min = float(row["x_min"])
y_min = float(row["y_min"])
width = float(row["width"])
height = float(row["height"])
x_max = x_min + width
y_max = y_min + height
image_name = row["image_name"]
class_idx = row["label"]
return CSVRow(image_name=image_name,
bbox=BoundingBox(x_min, y_min, x_max, y_max, class_idx))
| [
37811,
32,
44189,
23025,
6260,
326,
9743,
262,
275,
3524,
287,
2124,
11,
331,
11,
266,
11,
289,
5794,
526,
15931,
198,
6738,
1221,
349,
432,
13,
34574,
602,
1330,
347,
9969,
14253,
198,
6738,
764,
19199,
1330,
9429,
13024,
322,
11,
... | 2.366197 | 355 |
import asyncio
import random
import discord
import drop.errors
from discord.ext import commands
from data.extdata import get_language_str, wait_for_user, get_file_type
from drop.tempban import *
from drop.errors import *
with open("data/embed_colors.json") as f:
colors = json.load(f)
color_list = [c for c in colors.values()]
class Moderation(commands.Cog):
"""
Commands that (hopefully) may help you moderate the server
"""
@commands.command(
name='purge',
description='Deletes a certain amount of messages.',
usage='5',
brief='Deletes a set amount of messages'
)
@commands.has_guild_permissions(manage_messages=True)
@commands.command(
name='kick',
description='Kicks a specified user. Not sure why you\'d want to use the bot for this, but okay.',
usage='<@offender> reason (optional)',
brief='Kicks a user'
)
@commands.has_guild_permissions(manage_messages=True)
@kick_command.error
@commands.command(
name='ban',
description='Bans a specified user. Not sure why you wouldn\'t want to do it yourself, but okay.',
usage='<@offender> reason (optional)',
brief='Bans a user'
)
@commands.has_guild_permissions(manage_messages=True)
@ban_command.error
@commands.command(
name='unban',
description='Unbans a specified user. Again, I don\'t know why you wouldn\' want to do it yourself.',
usage='<@offender>',
brief='Unbans a user'
)
@commands.has_guild_permissions(manage_messages=True)
@unban_command.error
@commands.command(
name='storepins',
description='Stores all of the pinned messages in a certain channel.',
usage='storepins <#channel to store pins in>',
brief='Store all of the pins in a channel',
aliases=['savepins', 'pincenter']
)
@commands.has_permissions(manage_messages=True)
@storepins_command.error
@commands.command(
name='tempban',
description='This will ban someone, then unban them after a specified time.',
usage='<@offender 1> <@offender 2> 1h30',
brief='Temporarily bans a user'
)
@commands.has_guild_permissions(ban_members=True)
@tempban_command.error
@commands.command(
name='ban_status',
description='Checks if the user is temp-banned, and for how long/by who they have been temp-banned.',
usage='Offender#0123 (can also just be Offender, or their user ID)',
brief='Checks a user\'s ban status',
aliases=["checktempban", "check_tempban", "tempbanstatus", "banstatus"]
)
@commands.has_guild_permissions(manage_roles=True)
@temp_ban_status_command.error
@commands.Cog.listener()
| [
11748,
30351,
952,
198,
11748,
4738,
198,
198,
11748,
36446,
198,
11748,
4268,
13,
48277,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
1366,
13,
2302,
7890,
1330,
651,
62,
16129,
62,
2536,
11,
4043,
62,
1640,
62,
7220,
11,
651,
... | 2.533998 | 1,103 |
from models.Sizers.Sizer import Sizer as Sizer
import backtrader as bt
from dataclasses import dataclass
from dataclasses import field
@dataclass
class DefaultSizer(Sizer):
"""
This is the default sizer used in Engine
It's a PercentSizer, paremetered with 10%
"""
sizer: bt.Sizer = bt.sizers.PercentSizer
parameters: dict = field(default_factory=lambda: {"percents": 10})
| [
6738,
4981,
13,
50,
11341,
13,
50,
7509,
1330,
311,
7509,
355,
311,
7509,
198,
11748,
736,
2213,
5067,
355,
275,
83,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
4818,
330,
28958,
1330,
2214,
628,
198,
31,
19608,
... | 2.836879 | 141 |
# -*- coding: utf-8 -*-
import os
import subprocess
import sys
import warnings
from pathlib import Path
from pprint import pprint
from typing import Dict, List
import yaml
from envparse import env
from duplicity_backup_s3.defaults import (
FULL_IF_OLDER_THAN,
DUPLICITY_BACKUP_ARGS,
DUPLICITY_VERBOSITY,
NEED_SUBPROCESS_SHELL,
DUPLICITY_MORE_VERBOSITY,
DUPLICITY_BASIC_ARGS,
DUPLICITY_DEBUG_VERBOSITY,
)
from duplicity_backup_s3.utils import echo_info, echo_failure
# /bin/duplicity
# -v3
# --dry-run
# --full-if-older-than 7D
# --s3-use-new-style
# --s3-european-buckets
# --no-encryption
# --exclude-device-files
# --include=/opt/ke-chain/*-media
# --include=/opt/ke-chain/var/archives
# --exclude=**
# /opt/ke-chain/ # src
# s3+http://kew-prod-backup-target/kec-prod23/ # target
class DuplicityS3(object):
"""
Main object for Duplicity S3 Commands.
:ivar options: arguments provided to the class
:ivar verbose: verbosity level
:ivar dry_run: do dry_run only
:ivar env: environment object from parse environment
"""
def __init__(self, **options):
"""Initiate of the DuplicityS3 object with options.
:param options: dictionary with options.
:type options: Dict[Any:Any]
"""
self._config_file = Path(Path.cwd() / options.get("config")) # type: Path
self._config = {} # place holder where the configuration is read in
self.options = options # type: Dict
self.read_config(path=self._config_file)
self.verbose = options.get("verbose", False) # type: bool
# in case of verbosity be more than 3 verbose
duplicity_verbosity = (
DUPLICITY_MORE_VERBOSITY if options.get("verbose") else DUPLICITY_VERBOSITY
)
if options.get("debug"):
duplicity_verbosity = DUPLICITY_DEBUG_VERBOSITY
self._args = [
"-v{}".format(duplicity_verbosity)
] + DUPLICITY_BASIC_ARGS # type: List
self.dry_run = options.get("dry_run", False) # type: bool
# setting environment
self.env = env
with warnings.catch_warnings(): # catch the warnings that env puts out.
warnings.simplefilter("ignore", UserWarning)
self.env.read_envfile()
def read_config(self, path: Path = None) -> None:
"""Read the config file.
Stores the configuration in the protected variable `self._config`.
"""
if path is None:
path = self._config_file
if not path.exists():
raise ValueError(
"Could not find the configuration file in path '{}'".format(path)
)
self._config = {} # type: ignore
with self._config_file.open() as fd:
self._config = yaml.safe_load(fd)
def get_aws_secrets(self) -> Dict:
"""AWS secrets either from the environment or from the configuration file."""
if (
"aws" in self._config
and "AWS_SECRET_ACCESS_KEY" in self._config.get("aws") # type: ignore
and "AWS_ACCESS_KEY_ID" in self._config.get("aws") # type: ignore
):
return self._config.get("aws") # type: ignore
else:
return dict(
AWS_ACCESS_KEY_ID=self.env("AWS_ACCESS_KEY_ID", default="")
or self._config.get("aws"),
AWS_SECRET_ACCESS_KEY=self.env("AWS_SECRET_ACCESS_KEY", default=""),
)
def _execute(self, *cmd_args, runtime_env: Dict = None) -> int:
"""Execute the duplicity command."""
command = [self.duplicity_cmd(), *cmd_args]
if self.verbose:
print("command used:")
print([*cmd_args])
print("environment:")
pprint(
[
"{} = {}".format(k, v)
for k, v in os.environ.items()
if ("SECRET" not in k) and (("AWS" in k) or ("DUPLICITY" in k))
]
)
self.last_results = subprocess.run(
command, shell=NEED_SUBPROCESS_SHELL, env=runtime_env
)
try:
self.last_results.check_returncode()
except subprocess.CalledProcessError as e:
echo_failure(
"The duplicity command exited with an error. "
"Command may not have succeeded."
)
if self.verbose:
echo_info("More information on the error:\n{}".format(e.output))
return self.last_results.returncode
@classmethod
def duplicity_cmd(cls, search_path=None) -> str:
"""
Check if duplicity is installed and return version.
:param search_path: path to search for duplicity if not in PATH. defaults None.
:return: path to duplicity
:raises OSError: When the duplicity command is not found in PATH.
"""
from shutil import which
duplicity_cmd = which("duplicity", path=search_path)
if not duplicity_cmd:
raise OSError("Could not find `duplicity` in path, is it installed?")
return duplicity_cmd
@staticmethod
def get_cludes(includes: List[str] = None, excludes: List[str] = None) -> List[str]:
"""
Get includes or excludes command arguments.
:param includes: list of file includes (absolute paths, not relative from root)
:param excludes: list of file exnludes (absolute paths, not relative from root)
:return:
"""
arg_list = []
if includes:
arg_list.extend(["--include={}".format(path) for path in includes])
if excludes:
arg_list.extend(["--exclude={}".format(path) for path in excludes])
return arg_list
def do_incremental(self) -> int:
"""
Incremental duplicity Backup.
:return: error code
"""
source = self._config.get("backuproot")
target = "s3+http://{bucket}/{path}".format(
**self._config.get("remote")
) # type: ignore
args = (
self._args
+ DUPLICITY_BACKUP_ARGS
+ [
"--full-if-older-than",
self._config.get("full_if_older_than", FULL_IF_OLDER_THAN),
]
+ self.get_cludes(
includes=self._config.get("includes"),
excludes=self._config.get("excludes"),
)
)
runtime_env = self.get_aws_secrets()
action = "incr"
if self.dry_run:
args.append("--dry-run")
return self._execute(action, *args, source, target, runtime_env=runtime_env)
def do_restore(self) -> int:
"""Restore the backup.
From the duplicity man page:
restore [--file-to-restore <relpath>] [--time <time>] <url> <target_folder>
You can restore the full monty or selected folders/files from
a specific time. Use the relative path as it is printed by
list-current-files. Usually not needed as duplicity enters
restore mode when it detects that the URL comes before the
local folder.
:return: return_code of duplicity
"""
args = self._args
action = "restore"
restore_from_url = "s3+http://{bucket}/{path}".format(
**self._config.get("remote")
) # type: ignore
target = self.options.get("target")
runtime_env = self.get_aws_secrets()
if self.dry_run:
args.append("--dry-run")
if self.options.get("file") is not None:
args.extend((["--file-to-restore", self.options.get("file")]))
if self.options.get("time") is not None:
args.extend(["--time", self.options.get("time")])
if self.verbose:
echo_info("restoring backup in directory: {}".format(target))
return self._execute(action, *args, restore_from_url, target, runtime_env=runtime_env)
def do_verify(self) -> int:
"""Verify the backup.
From the duplicity man page:
Verify [--compare-data] [--time <time>] [--file-to-restore <rel_path>]
<url> <local_path>
Restore backup contents temporarily file by file and compare against
the local path’s contents. Duplicity will exit with a non-zero error
level if any files are different. On verbosity level info (4) or
higher, a message for each file that has changed will be logged.
The --file-to-restore option restricts verify to that file or folder.
The --time option allows to select a backup to verify against.
The --compare-data option enables data comparison.
:return: return_code of duplicity
"""
from duplicity_backup_s3.utils import temp_chdir
with temp_chdir() as target:
source = "s3+http://{bucket}/{path}".format(
**self._config.get("remote")
) # type: ignore
args = self._args
runtime_env = self.get_aws_secrets()
action = "verify"
if self.dry_run:
args.append("--dry-run")
if self.options.get("file") is not None:
args.extend(["--file-to-restore", self.options.get("file")])
if self.options.get("time") is not None:
args.extend(["--time", self.options.get("time")])
if self.verbose:
echo_info("verifying backup in directory: {}".format(target))
return self._execute(action, *args, source, target, runtime_env=runtime_env)
def do_cleanup(self) -> int:
"""
Cleanup of dirty remote.
From the duplicity manpage:
cleanup [--force] [--extra-clean] <url>
Delete the extraneous duplicity files on the given backend.
Non-duplicity files, or files in complete data sets will not
be deleted. This should only be necessary after a duplicity session
fails or is aborted prematurely. Note that --force will be
needed to delete the files instead of just listing them.
:return: returncode
"""
target = "s3+http://{bucket}/{path}".format(
**self._config.get("remote")
) # type: ignore
args = self._args
runtime_env = self.get_aws_secrets()
action = "cleanup"
if self.dry_run:
args.append("--dry-run")
if self.options.get("force"):
args.append("--force")
if self.verbose:
echo_info("Cleanup the backup in target: '{}'".format(target))
return self._execute(action, *args, target, runtime_env=runtime_env)
def do_collection_status(self) -> int:
"""
Check the status of the collections in backup.
From the docs:
collection-status <url>
Summarize the status of the backup repository by printing the chains
and sets found, and the number of volumes in each.
:return: returncode
"""
target = "s3+http://{bucket}/{path}".format(
**self._config.get("remote")
) # type: ignore
action = "collection-status"
if self.verbose:
echo_info("Collection status of the backup in target: '{}'".format(target))
return self._execute(
action, *self._args, target, runtime_env=self.get_aws_secrets()
)
def do_list_current_files(self) -> int:
"""
List current files included in the backup.
from the docs:
list-current-files [--time <time>] <url>
Lists the files contained in the most current backup or backup at
time. The information will be extracted from the signature files,
not the archive data itself. Thus the whole archive does not have
to be downloaded, but on the other hand if the archive has been
deleted or corrupted, this command will not detect it.
:return: returncode
"""
target = "s3+http://{bucket}/{path}".format(
**self._config.get("remote")
) # type: ignore
args = self._args
action = "list-current-files"
if self.options.get("time") is not None:
args.extend(["--time", self.options.get("time")])
if self.verbose:
echo_info("Collection status of the backup in target: '{}'".format(target))
return self._execute(action, *args, target, runtime_env=self.get_aws_secrets())
def do_remove_older(self) -> int:
"""Remove older backup sets.
From the docs:
remove-older-than <time> [--force] <url>
Delete all backup sets older than the given time. Old backup
sets will not be deleted if backup sets newer than time depend
on them. See the TIME FORMATS section for more information.
Note, this action cannot be combined with backup or other
actions, such as cleanup. Note also that --force will be needed
to delete the files instead of just listing them.
remove-all-but-n-full <count> [--force] <url>
Delete all backups sets that are older than the count:th last
full backup (in other words, keep the last count full backups
and associated incremental sets). count must be larger than zero.
A value of 1 means that only the single most recent backup chain
will be kept. Note that --force will be needed to delete the f
iles instead of just listing them.
remove-all-inc-of-but-n-full <count> [--force] <url>
Delete incremental sets of all backups sets that are older than
the count:th last full backup (in other words, keep only old full
backups and not their increments). count must be larger than zero.
A value of 1 means that only the single most recent backup chain
will be kept intact. Note that --force will be needed to delete
the files instead of just listing them.
"""
target = "s3+http://{bucket}/{path}".format(
**self._config.get("remote")
) # type: ignore
args = self._args
action = None
if self.options.get("time") is not None:
action = ["remove-older-than", self.options.get("time")]
if self.options.get("all_but_n_full") is not None:
action = ["remove-all-but-n-full", str(self.options.get("all_but_n_full"))]
if self.options.get("all_incremental_but_n_full") is not None:
action = [
"remove-all-inc-but-n-full",
str(self.options.get("all_incremental_but_n_full")),
]
if action is None:
echo_failure("Please provide a remove action")
if self.verbose:
print(self.options)
sys.exit(1)
if self.options.get("force"):
args.append("--force")
if self.verbose:
echo_info("Collection status of the backup in target: '{}'".format(target))
return self._execute(*action, *args, target, runtime_env=self.get_aws_secrets())
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
14601,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
6738,
19720,
13... | 2.272026 | 6,742 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8
#
# ctm2tg: a script to convert CTM files from Kaldi aligner
# to Praat's TextGrid format
#
# Grupo FalaBrasil (2021)
# Universidade Federal do Pará
#
# author: apr 2019
# cassio batista - https://cassota.gitlab.io
# updated on apr 2021
import sys
import os
import shutil
TG_NAMES = [
'fonemeas', 'silabas-fonemas', 'palavras-grafemas',
'frase-fonemas', 'frase-grafemas',
]
CTM_SIL_ID = '1' # TODO: keep an eye on sil id occurrences -- CB
if __name__=='__main__':
if len(sys.argv) != 6:
print('usage: %s <ctm-graph-file> <ctm-phoneid-file> '
'<lex-dict> <syll-dict> <out-dir>' % sys.argv[0])
print(' <ctm-graph-file> is the CTM file with graphemes')
print(' <ctm-phoneid-file> is te CTM file with phonetic ids')
print(' <lex-dict> is the lexicon (phonetic dictionary)')
print(' <syll-dict> is the syllabic dictionary')
print(' <out-dir> is the output dir to store the textgrid file')
sys.exit(1)
tg = TextGrid()
ctm_graph_filename = sys.argv[1]
ctm_phone_filename = sys.argv[2]
lex_filename = sys.argv[3]
syll_filename = sys.argv[4]
tg_output_dirname = sys.argv[5]
# sanity check
check_ctm('phoneids', ctm_phone_filename)
check_ctm('graphemes', ctm_graph_filename)
tg.check_outputdir(tg_output_dirname)
ctm = {
'graph':open(ctm_graph_filename, 'r'),
'phnid':open(ctm_phone_filename, 'r')
}
ctm_lines = {
'graph':get_file_numlines(ctm['graph']),
'phnid':get_file_numlines(ctm['phnid'])
}
lex = {}
with open(lex_filename) as f:
for line in f:
try:
grapheme, phonemes = line.split('\t')
lex[grapheme.strip()] = phonemes.strip()
except ValueError:
print('**lex problem: %s' % line, '\t' in line)
lex[line.strip()] = line.strip()
syll = {}
with open(syll_filename) as f:
for line in f:
try:
grapheme, syllables = line.split('\t')
syll[grapheme.strip()] = syllables.strip()
except ValueError:
print('**syll problem: %s' % line)
syll[line.strip()] = line.strip()
fp_index = { 'graph': 0, 'phnid': 0 }
start = { 'graph': [], 'phnid': [], 'sylph': [] }
finish = { 'graph': [], 'phnid': [], 'sylph': [] }
bt = { 'graph': 0, 'phnid': 0 }
dur = { 'graph': 0, 'phnid': 0 }
tokenlist = {
'phnid':[], # 0 (1) phoneme ids as they appear in the CTM file
'sylph':[], # 1 (2) phonemes separated by syllabification of graphemes
'graph':[], # 2 (3) graphemes (words)
'phrph':[], # 4 (5) phrase of phonemes separated by the space between graphemes
'phrgr':[], # 3 (4) phrase of graphemes (words)
'phone':[], # phonemes as they occur in the list of words
}
# treat .grapheme file
filepath, chn, bt['graph'], dur['graph'], grapheme = ctm['graph'].readline().split()
old_name = curr_name = filepath.split(sep='_', maxsplit=1).pop()
start['graph'].append(float(bt['graph']))
finish['graph'].append(float(bt['graph']) + float(dur['graph']))
tokenlist['graph'].append(grapheme)
fp_index['graph'] += 1
while fp_index['phnid'] < ctm_lines['phnid']:
while curr_name == old_name:
if fp_index['graph'] >= ctm_lines['graph']:
break
filepath, chn, bt['graph'], dur['graph'], grapheme = ctm['graph'].readline().split()
curr_name = filepath.split(sep='_', maxsplit=1).pop()
start['graph'].append(float(bt['graph']))
finish['graph'].append(float(bt['graph']) + float(dur['graph']))
tokenlist['graph'].append(grapheme)
fp_index['graph'] += 1
# FIXME: dumb way to avoid the first word of the next sentence to be
# appended to the end of the current one
if fp_index['graph'] < ctm_lines['graph']:
start['graph'].pop()
finish['graph'].pop()
tokenlist['graph'].pop()
# treat .phoneids file
filepath, chn, bt['phnid'], dur['phnid'], phoneme = ctm['phnid'].readline().split()
curr_name = filepath.split(sep='_', maxsplit=1).pop()
start['phnid'].append(float(bt['phnid']))
finish['phnid'].append(float(bt['phnid']) + float(dur['phnid']))
tokenlist['phnid'].append(phoneme)
fp_index['phnid'] += 1
while curr_name == old_name:
if fp_index['phnid'] >= ctm_lines['phnid']:
break
filepath, chn, bt['phnid'], dur['phnid'], phoneme = ctm['phnid'].readline().split()
curr_name = filepath.split(sep='_', maxsplit=1).pop()
start['phnid'].append(float(bt['phnid']))
finish['phnid'].append(float(bt['phnid']) + float(dur['phnid']))
tokenlist['phnid'].append(phoneme)
fp_index['phnid'] += 1
# FIXME: dumb way to avoid the first phoneme of the next sentence to be
# appended to the end of the current one
if fp_index['phnid'] < ctm_lines['phnid']:
start['phnid'].pop()
finish['phnid'].pop()
tokenlist['phnid'].pop()
# prepare tg item's basic data structures
tokenlist['phone'] = []
for word in tokenlist['graph']:
if word == '<UNK>':
tokenlist['sylph'].append(word)
tokenlist['phone'].append(word)
tokenlist['phrph'].append(word)
tokenlist['phrgr'].append(word)
continue
elif word == 'cinquenta':
tokenlist['sylph'].append('si~')
tokenlist['sylph'].append('kwe~')
tokenlist['sylph'].append('ta')
elif word == 'veloz':
tokenlist['sylph'].append('ve')
tokenlist['sylph'].append('lOjs')
elif word == 'dez':
tokenlist['sylph'].append('dEjs')
else:
for sylph in syll[word].split('-'):
tokenlist['sylph'].append(sylph.replace('\'',''))
phonemes = lex[word]
for phone in phonemes.split():
tokenlist['phone'].append(phone)
tokenlist['phrph'].append(phonemes.replace(' ', ''))
tokenlist['phrgr'].append(word)
# write things to textgrid file
with open('%s/%s.TextGrid' % (tg_output_dirname, old_name), 'w') as f:
sys.stdout.write('\r%s' % old_name)
sys.stdout.flush()
f.write(tg.get_mainheader(finish['graph'][-1]))
for item in range(5):
f.write(tg.get_itemcontent(item, tokenlist, start, finish))
# flush vars
start['graph'] = [float(bt['graph'])]
finish['graph'] = [float(bt['graph']) + float(dur['graph'])]
tokenlist['graph'] = [grapheme]
old_name = curr_name
start['phnid'] = [float(bt['phnid'])]
finish['phnid'] = [float(bt['phnid']) + float(dur['phnid'])]
tokenlist['phnid'] = [phoneme]
tokenlist['sylph'] = []
tokenlist['phrph'] = []
tokenlist['phrgr'] = []
print('\tdone!')
ctm['graph'].close()
ctm['phnid'].close()
### EOF ###
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43907,
25,
900,
2393,
12685,
7656,
28,
40477,
12,
23,
198,
2,
198,
2,
269,
17209,
17,
25297,
25,
257,
4226,
... | 1.984542 | 3,752 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for named entity recognition on CoNLL-2003 (Bert or Roberta). """
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import WEIGHTS_NAME, BertConfig, BertForTokenClassification, BertTokenizer
if __name__ == "__main__":
from metrics import precision, recall, f1
from load_data import convert_examples_to_features, get_labels, read_examples_from_file
from model import BertForBinaryTokenClassification
else:
from .metrics import precision, recall, f1
from .load_data import convert_examples_to_features, get_labels, read_examples_from_file
from .model import BertForBinaryTokenClassification
logger = logging.getLogger(__name__)
ALL_MODELS = tuple(BertConfig.pretrained_config_archive_map.keys())
MODEL_CLASSES = {
"bert": (BertConfig, BertForBinaryTokenClassification, BertTokenizer)
}
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
cuda_indices = [0, 1, 2, 3, 6, 7]
batch = tuple(t.to(args.device) if i in cuda_indices else t for i, t in enumerate(batch))
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
"span_labels": batch[3],
"span_size": batch[4],
"span_list": batch[5],
"slot_labels": batch[6],
"slot_mask": batch[7],
"rel_size": batch[8],
"rel_list": batch[9],
"question_length": batch[10],
"span_null_label_id": labels[0].index('O'),
"global_step": global_step,
"args": args}
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
# span_logits = outputs[1][0]
# span_pred = [torch.max(sl, 2)[1] for sl in span_logits].detach().cpu().numpy()
# print(span_pred.shape)
# exit()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scheduler.step() # Update learning rate schedule
optimizer.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="test",filename= os.path.join(args.data_dir, "{}.jsonl".format("test")))
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model,
"module") else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
if __name__ == "__main__":
main()
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
2864,
383,
3012,
9552,
15417,
4816,
46665,
290,
383,
12905,
2667,
32388,
3457,
13,
1074,
13,
198,
2,
15069,
357,
66,
8,
2864,
11,
15127,
23929,
44680,
6234,
13,
220,
1439,
2489,
10395,
13,... | 2.270387 | 4,157 |
# Copyright (c) 2022, Leonardo Lamanna
# All rights reserved.
# This source code is licensed under the MIT-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os.path
import sys
import Configuration
from Util.Simulator import Simulator
from Util import preprocessing, LogReader, Dataframe_generator
from OLAM.Learner import *
from Util.PddlParser import PddlParser
# import gym
# import pddlgym # Do not delete this if you want to use pddlgym
np.set_printoptions(threshold=sys.maxsize)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
def compute_all_actionFF():
"""
Compute all action list through "adl2strips" with pddl problem files
:return: None
"""
op_input = list(get_operator_signatures().keys())
op_not_learned = []
for op in op_input:
op_prec = get_operator_preconditions(op)
op_prec = [el for el in op_prec if el.find("(and )") == -1]
if len(op_prec) == 0:
op_not_learned.append(op)
all_action_op_not_learned = compute_all_action_of_ops(op_not_learned)
# Copy input domain to a temp one
shutil.copyfile("PDDL/domain_input.pddl", "PDDL/domain_input_tmp.pddl")
with open("PDDL/domain_input_tmp.pddl", "r") as f:
data = f.read().split("\n")
# Remove not learned operators
with open("PDDL/domain_input_tmp.pddl", "w") as f:
removed_rows = []
for i in range(len(data)):
if data[i].find(":action") != -1 and data[i].strip().split()[1] in op_not_learned:
removed_rows.extend(list(range(i, i+5)))
[f.write(data[i] + "\n") for i in range(len(data)) if i not in removed_rows]
with open("PDDL/domain_input_tmp.pddl", "r") as f:
data = f.read().split("\n")
# Get all possible effects
with open(os.path.join("PDDL", "operator_uncertain_positive_effects.json")) as f:
operator_uncertain_positive_effects = json.load(f)
with open("PDDL/domain_input_tmp.pddl", "w") as f:
for i in range(len(data)):
if data[i].find(":predicates") != -1:
all_obj = get_all_object()
all_obj_fict_preds = ["(appear_{} ?obj - {})".format(k, k) for k in all_obj.keys()]
data[i] = data[i] + "\n" + "\n".join(all_obj_fict_preds)
data[i] = data[i] + "\n(true )"
elif data[i].find(":action") != -1:
op_name = data[i].strip().split()[1]
op_params = [el for i,el in enumerate(data[i+1].replace(":parameters", "").strip()[1:-1].split()) if el.startswith("?")]
# op_params_types = [el for i,el in enumerate(data[i+1].replace(":parameters", "").strip()[1:-1].split())
# if not el.startswith("?") and el.strip() != "-"]
single_obj_count = 0
op_params_types = []
row = [el for el in data[i+1].replace(":parameters", "").strip()[1:-1].split() if el.strip() != "-"]
for el in row:
if el.startswith("?"):
single_obj_count += 1
else:
[op_params_types.append(el) for _ in range(single_obj_count)]
single_obj_count = 0
op_effect = data[i+5].replace(":effect", "")
if op_effect.find("(and") != -1:
op_effect = op_effect.replace("(and ", "")
# op_effect = op_effect.strip()[:-1]
op_effect = op_effect.strip()[:-2]
fictitious_eff = ""
for param in op_params:
if " ".join(data[i+2:i+6]).find(param + ")") == -1 and " ".join(data[i+2:i+6]).find(param + " ") == -1:
n = op_params.index(param)
fictitious_eff += "(appear_{} ?param_{})".format(op_params_types[n], n+1)
# fictitious_eff = " ".join(["(appear_{} ?param_{})".format(op_params_types[n], n+1) for n in range(len(op_params_types))])
data[i + 5] = ":effect (and {}))".format(fictitious_eff + op_effect + " " + " ".join(operator_uncertain_positive_effects[op_name]))
# Add fictitious action
for i in range(len(data)):
if data[i].find("(:action") != -1:
data[i] = "(:action fict\n:parameters ()\n:precondition(and)\n:effect(true))"+ "\n" + data[i]
break
# Write new domain temp file
[f.write(line + "\n") for line in data]
# Copy facts file to a temp one and remove goal
shutil.copyfile("PDDL/facts.pddl", "PDDL/facts_tmp.pddl")
with open("PDDL/facts_tmp.pddl", "r") as f:
data = f.read().split("\n")
with open("PDDL/facts_tmp.pddl", "w") as f:
for i in range(len(data)):
if data[i].find(":goal") != -1:
for j in range(i+1, len(data)):
data[j] = ""
if data[i].strip().startswith(")"):
data[i] = ")\n(:goal (and (true))))"
else:
data[i] = "(:goal (and (true))))"
[f.write(el + "\n") for el in data]
bash_command = "Planners/FF/ff -o PDDL/domain_input_tmp.pddl -f PDDL/facts_tmp.pddl -i 114 >> outputff.txt"
process = subprocess.Popen(bash_command, shell=True)
process.wait()
# print("(Preprocessing) -- ADL2STRIPS Finished!")
#
# print("(Preprocessing) -- Reading ADL2STRIPS output...")
action_labels = []
with open("outputff.txt", "r") as ground_actions_file:
data = ground_actions_file.read().split("\n")
for i in range(len(data)):
line = data[i]
if line.find("-----------operator") != -1:
op_name = line.split()[1].split(":")[0].strip().lower()
if op_name.strip() != "fict":
for j in range(i+1, len(data)):
if data[j].find("-----------operator") != -1 or data[j].find("Cueing down from goal distance") != -1:
break
action_obj = [el.lower() for k,el in enumerate(data[j].replace(",", "").split()) if k%3==0][1:]
if len(action_obj) > 0:
action_labels.append("{}({})".format(op_name, ",".join(action_obj)))
# print("(Preprocessing) -- Reading ADL2STRIPS finished!")
action_labels = sorted(action_labels)
# Remove FF files
os.remove("PDDL/domain_input_tmp.pddl")
os.remove("PDDL/facts_tmp.pddl")
os.remove("outputff.txt")
return sorted(action_labels + all_action_op_not_learned)
def compute_all_actionADL():
"""
Compute all action list through "adl2strips" with pddl problem files
:return: None
"""
# print("(Preprocessing) -- Calling ADL2STRIPS to get input action list...")
# bash_command = "Planners/ADL2STRIPS/adl2strips -o PDDL/domain_learned.pddl -f PDDL/facts.pddl"
# bash_command = "Planners/ADL2STRIPS/adl2strips -o PDDL/domain.pddl -f PDDL/facts.pddl"
bash_command = "Planners/ADL2STRIPS/adl2strips -o PDDL/domain_input.pddl -f PDDL/facts.pddl"
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)
process.wait()
# print("(Preprocessing) -- ADL2STRIPS Finished!")
#
# print("(Preprocessing) -- Reading ADL2STRIPS output...")
with open(Configuration.ADL2STRIPS_FILE, "r") as ground_actions_file:
data = ground_actions_file.read().split("\n")
action_labels = [row[8:-2].strip().lower().replace("- ", "(", 1).replace("- ",",") + ")"
for row in filter(lambda k: '(:action' in k, data)]
# print("(Preprocessing) -- Reading ADL2STRIPS finished!")
# Remove ADL2STRIPS files
os.remove(Configuration.ADL2STRIPS_FILE)
os.remove("facts.pddl")
return action_labels
def compute_all_action():
"""
Compute all action list through cartesian product of input objects
:return: None
"""
all_action_labels = []
all_objs = get_all_object()
all_op = get_operator_signatures()
obj_types = get_object_types_hierarchy()
for op in all_op.keys():
# Compute all combinations of action input object types, subclassing all supertypes
subclass_obj_types = [obj_types[el] if len(obj_types[el]) > 0 else [el] for el in all_op[op]]
subclass_obj_types = [list(p) for p in itertools.product(*subclass_obj_types)]
for tuple_input_obj in subclass_obj_types:
op_obj_lists = [all_objs[obj_key] for obj_key in tuple_input_obj]
all_obj_combinations = itertools.product(*op_obj_lists)
[all_action_labels.append("{}({})".format(op, ",".join(objs))) for objs in all_obj_combinations]
return all_action_labels
def compute_all_action_of_ops(operators):
"""
Compute all action list through cartesian product of input objects
:return: None
"""
all_action_labels = []
all_objs = get_all_object()
all_op = get_operator_signatures()
obj_types = get_object_types_hierarchy()
for op in [el for el in all_op.keys() if el in operators]:
# Compute all combinations of action input object types, subclassing all supertypes
subclass_obj_types = [obj_types[el] if len(obj_types[el]) > 0 else [el] for el in all_op[op]]
subclass_obj_types = [list(p) for p in itertools.product(*subclass_obj_types)]
for tuple_input_obj in subclass_obj_types:
op_obj_lists = [all_objs[obj_key] for obj_key in tuple_input_obj]
all_obj_combinations = itertools.product(*op_obj_lists)
[all_action_labels.append("{}({})".format(op, ",".join(objs))) for objs in all_obj_combinations]
return all_action_labels
def learn_instance(path_logs, simulator, parser, all_actions):
"""
Create the learner, print some starting information, solve the problem instance, store the learnt action model
and evaluate metrics (e.g. precision, recall, ecc...)
:param path_logs: log file path
:param simulator: pddlgym simulator
:param parser: pddl domain parser
:param all_actions: list of all domain actions
:return: None
"""
# Instantiate the Learner
l = Learner(parser=parser, action_list=all_actions)
log_file_path = "{}/{}_log".format(path_logs, Configuration.INSTANCE_DATA_PATH_PDDL.split("/")[-1].split(".")[0])
log_file = open(log_file_path, "w")
print("Running OLAM...")
# print("\nTotal actions: {}".format(len(all_actions)))
#
# print("\nObjects list\n\t{}\n\n".format("\n\t".join(["{}:{}".format(k, len(v)) for k,v in get_all_object().items()])))
old_stdout = sys.stdout
if not Configuration.OUTPUT_CONSOLE:
print(f'Standard output redirected to {log_file_path}')
sys.stdout = log_file
print("\nTotal actions: {}".format(len(all_actions)))
print("\nObjects list\n\t{}\n\n".format("\n\t".join(["{}:{}".format(k, len(v)) for k,v in get_all_object().items()])))
# Learn action model from problem instance
l.learn(eval_frequency=10, simulator=simulator)
log_file.close()
if not Configuration.OUTPUT_CONSOLE:
LogReader.evaluate_log_metrics(log_file_path)
sys.stdout = old_stdout
print("End of OLAM resolution.")
# Compute learned domain with certain preconditions
shutil.copyfile("PDDL/domain_learned.pddl", "PDDL/domain_learned_certain.pddl")
with open("PDDL/domain_learned_certain.pddl", "r") as f:
data = f.read().split("\n")
with open("PDDL/domain_learned_certain.pddl", "w") as f:
for i in range(len(data)):
line = data[i]
if line.find(":action") != -1:
op_name = line.split()[1]
precond = sorted(re.findall("\([^()]*\)", data[i+3]))
to_remove = []
for prec in precond:
if prec not in l.operator_certain_predicates[op_name]:
to_remove.append(prec)
if len([prec for prec in precond if prec not in to_remove]) > 0:
data[i+3] = "\t\t"+ " ".join([prec for prec in precond if prec not in to_remove])
else:
data[i+3] = ")"
[f.write(line + "\n") for line in data]
# Save uncertain preconditions of each learned operator
with open(os.path.join("PDDL", "operator_uncertain_precs.json"), "w") as outfile:
# json.dump(self.operator_negative_preconditions, outfile)
json.dump(l.operator_uncertain_predicates, outfile, indent=2)
shutil.copyfile(os.path.join("PDDL", "operator_uncertain_precs.json"),
os.path.join(path_logs, "operator_uncertain_precs.json"))
# Save certain positive effects of each learned operator
with open(os.path.join("PDDL", "operator_certain_positive_effects.json"), "w") as outfile:
# json.dump(self.operator_negative_preconditions, outfile)
json.dump(l.certain_positive_effects, outfile, indent=2)
shutil.copyfile(os.path.join("PDDL", "operator_certain_positive_effects.json"),
os.path.join(path_logs, "operator_certain_positive_effects.json"))
# Save certain negative effects of each learned operator
with open(os.path.join("PDDL", "operator_certain_negative_effects.json"), "w") as outfile:
# json.dump(self.operator_negative_preconditions, outfile)
json.dump(l.certain_negative_effects, outfile, indent=2)
shutil.copyfile(os.path.join("PDDL", "operator_certain_negative_effects.json"),
os.path.join(path_logs, "operator_certain_negative_effects.json"))
# Save potentially possible positive effects of each learned operator,
# i.e., effects that may be learned in a different problem
with open(os.path.join("PDDL", "operator_uncertain_positive_effects.json"), "w") as outfile:
# json.dump(self.operator_negative_preconditions, outfile)
json.dump(l.uncertain_positive_effects, outfile, indent=2)
shutil.copyfile(os.path.join("PDDL", "operator_uncertain_positive_effects.json"),
os.path.join(path_logs, "operator_uncertain_positive_effects.json"))
# Save potentially possible negative effects of each learned operator,
# i.e., effects that may be learned in a different problem
with open(os.path.join("PDDL", "operator_uncertain_negative_effects.json"), "w") as outfile:
# json.dump(self.operator_negative_preconditions, outfile)
json.dump(l.uncertain_negative_effects, outfile, indent=2)
shutil.copyfile(os.path.join("PDDL", "operator_uncertain_negative_effects.json"),
os.path.join(path_logs, "operator_uncertain_negative_effects.json"))
# Save useless possible preconditions of not yet learned operators,
# i.e., possible preconditions which has been satisfied during a previous resolution but for which
# the action has not been executable
with open(os.path.join("PDDL", "operator_useless_possible_precs.json"), "w") as outfile:
# json.dump(self.operator_negative_preconditions, outfile)
json.dump(l.useless_possible_precs, outfile, indent=2)
shutil.copyfile(os.path.join("PDDL", "operator_useless_possible_precs.json"),
os.path.join(path_logs, "operator_useless_possible_precs.json"))
# Save useless negated preconditions of not learned operators,
# i.e., preconditions that has been negated during a previous resolution but for which
# the action has not been executable
with open(os.path.join("PDDL", "operator_useless_negated_precs.json"), "w") as outfile:
# json.dump(self.operator_negative_preconditions, outfile)
json.dump(l.useless_negated_precs, outfile, indent=2)
shutil.copyfile(os.path.join("PDDL", "operator_useless_negated_precs.json"),
os.path.join(path_logs, "operator_useless_negated_precs.json"))
def solve_instance():
"""
Solve problem instance applying the following steps: Create the domain simulator,
create problem instance log directories and solve problem instance
:return: None
"""
# Create the simulator
simulator = Simulator()
# Get all actions list (this should be an input, or alternatively a superset of all possible actions which
# could be automatically computed by the learner)
# all_actions = compute_all_action()
op_input = list(get_operator_signatures().keys())
op_not_learned = []
if os.path.exists("PDDL/domain_input.pddl"):
for op in op_input:
op_prec = get_operator_preconditions(op)
op_prec = [el for el in op_prec if el.find("(and )") == -1]
if len(op_prec) == 0:
op_not_learned.append(op)
if os.path.exists("PDDL/domain_input.pddl") and len(op_not_learned) == 0:
# all_actions = compute_all_actionADL()
all_actions = compute_all_actionFF()
if len(all_actions) == 0:
print('Warning: bug in FF when computing all actions, using cartesian product')
all_actions = compute_all_action()
else:
all_actions = compute_all_action()
# Create the instance logs directory
dir_counter = 0
# path_root = "{}{}/{}/{}/".format(Configuration.ROOT_TEST_DIR, domain, Configuration.BENCHMARK_DIR,
# instance_name.split('.')[0])
path_root = os.path.join(Configuration.ROOT_TEST_DIR, domain, Configuration.BENCHMARK_DIR,
instance_name.split('.')[0])
while os.path.isdir(path_root):
dir_counter = dir_counter + 1
# path_root = "{}{}/{}/{}({})".format(Configuration.ROOT_TEST_DIR, domain, Configuration.BENCHMARK_DIR,
# instance_name.split('.')[0], dir_counter)
path_root = os.path.join(Configuration.ROOT_TEST_DIR, domain, Configuration.BENCHMARK_DIR,
f"{instance_name.split('.')[0]}({dir_counter})")
try:
os.makedirs(path_root)
except OSError:
print("Creation of the directory %s is failed" % path_root)
# Instantiate PDDL parser and update initial PDDL state
parser = PddlParser()
# parser.update_pddl_facts(obs)
# Solve problem instance
learn_instance(path_root, simulator, parser, all_actions)
# Save learned domain
shutil.copyfile("PDDL/domain_learned.pddl", os.path.join(path_root, "domain_learned.pddl"))
# Save learned domain with certain preconditions
shutil.copyfile("PDDL/domain_learned_certain.pddl", os.path.join(path_root, "domain_learned_certain.pddl"))
# Save input domain of solved problem, if it exists
if os.path.exists("PDDL/domain_input.pddl"):
shutil.copyfile("PDDL/domain_input.pddl", os.path.join(path_root, "domain_input.pddl"))
# Save learned domain as input domain for the next problem
shutil.copyfile("PDDL/domain_learned_certain.pddl", "PDDL/domain_input.pddl")
# Save reached state
shutil.copyfile("PDDL/facts.pddl", os.path.join(path_root, "final_state.pddl"))
if __name__ == "__main__":
# Set input arguments
args_parser = argparse.ArgumentParser()
args_parser.add_argument('-d', '--domain', help="Domain name (must be equal to domain benchmark instances root directory)",
type=str, default=None)
# Get input arguments
args = args_parser.parse_args()
domain = args.domain
# Check input arguments
assert (Configuration.MAX_ITER > 0), "MAX_ITER in Configuration.py must be greater than 0"
assert (isinstance(Configuration.NEG_EFF_ASSUMPTION, bool)), "NEG_EFF_ASSUMPTION in Configuration.py must be True or " \
"False, default value is False"
assert (isinstance(domain, str) or domain is None), "-domain must be a string equal to a domain benchmark instances root directory"
assert (domain in os.listdir(os.path.join("Analysis", "Benchmarks"))
or domain is None), "-domain must be equal to a domain benchmark " \
"instances root directory (in Analysis/Benchmarks)"
java_jdk_dir = [d for d in os.listdir(os.path.join(os.getcwd(), Configuration.JAVA_DIR))
if os.path.isdir(os.path.join(os.getcwd(), Configuration.JAVA_DIR, d))]
if len(java_jdk_dir) == 0:
print('\n\nMissing oracle jdk directory in "Java" directory. Please download oracle jdk tarball and extract it '
'into "Java" directory.')
elif len(java_jdk_dir) > 1:
print(f'\n\nMultiple jdk directories in "Java" directory. Please delete all jdk directories in "Java" '
f'directory but the chosen one. I am trying to execute the program by looking for java binary '
f'in {os.path.join(os.getcwd(), Configuration.JAVA_DIR, java_jdk_dir[0])}.')
java_jdk_dir = java_jdk_dir[0]
Configuration.JAVA_BIN_PATH = os.path.join(os.getcwd(), Configuration.JAVA_DIR, java_jdk_dir, "bin", "java")
assert os.path.exists(Configuration.JAVA_BIN_PATH), f"File not found: {Configuration.JAVA_BIN_PATH}"
assert (isinstance(Configuration.OUTPUT_CONSOLE, bool)), "OUTPUT_CONSOLE in Configuration.py must be True or False"
all_domains = []
if domain is None:
all_domains = [el for el in os.listdir(os.path.join("Analysis", "Benchmarks"))
if not el.endswith(".pddl")]
print('\n\nRunning OLAM over all domain in Analysis/Benchmarks directory')
else:
all_domains = [domain]
print(f'\n\nRunning OLAM in {domain} domain\n')
# Set test directory
runs = [d for d in os.listdir(Configuration.ROOT_DIR) if d.startswith('run_')]
Configuration.ROOT_TEST_DIR = os.path.join(Configuration.ROOT_DIR, f"run_{len(runs)}", "Tests")
# Configuration.ROOT_TEST_DIR = "{}Tests/".format(Configuration.ROOT_DIR)
for domain in all_domains:
# Domain benchmarks directory
instances_dir = "{}{}".format(Configuration.ROOT_BENCHMARKS_DIR, domain)
# Clean working files in PDDL directory
clean = False
if os.path.exists("PDDL/domain_input.pddl"):
with open("PDDL/domain_input.pddl", "r") as f:
for el in f.read().split("\n"):
if el.find("(domain") != -1:
# Special case for nomystery
if domain == "nomystery" and "transport" in el.lower().strip().split()[2].replace("-", ""):
clean = False
break
if domain.lower().replace("-", "") not in el.lower().strip().split()[2].replace("-", ""):
clean = True
break
else:
break
if clean:
shutil.rmtree("PDDL")
os.mkdir("PDDL")
all_instances = None
try:
all_instances = sorted(os.listdir(instances_dir), key=lambda x: int(x.split("_")[0]))
except ValueError:
print("All instance file names in domain benchmark directory {} must begin with "
"a number followed by underscore, e.g. 1_instancename".format(instances_dir))
assert all_instances is not None, print("All instance file names in domain benchmark directory {} must begin with "
"a number followed by underscore, e.g. 1_instancename. Moreover, the domain "
"benchmark directory must be into \"Analysis/Benchmarks\" directory".format(instances_dir))
for instance_name in all_instances:
# Set instance file name and path
Configuration.INSTANCE_DATA_PATH_PDDL = os.path.join("Analysis", "Benchmarks", domain, instance_name)
# Copy original domain and problem instance to working files
preprocessing.preprocess(domain)
# Clean temporary files (i.e., not executable actions files)
if os.path.exists("Info"):
shutil.rmtree("Info")
os.mkdir("Info")
# print("\n\n +-+-+-+-+-+-+-+-+-+-+-+-+-+ OLAM +-+-+-+-+-+-+-+-+-+-+-+-+-+\n")
print(f"\nSolving instance {Configuration.INSTANCE_DATA_PATH_PDDL}")
if os.path.exists("PDDL/domain_input.pddl"):
print("Reading input domain PDDL/domain_input.pddl, if you do not want to use an input domain, make "
"the PDDL directory empty")
# Solve instance
solve_instance()
# Clean not executable action files and PDDL files
shutil.rmtree("Info")
shutil.rmtree("PDDL")
if not Configuration.OUTPUT_CONSOLE:
# Generate final results without uncertain negative effects
if not Configuration.NEG_EFF_ASSUMPTION:
Dataframe_generator.generate_domain_dataframes()
Dataframe_generator.generate_domain_summary()
# Generate final results with uncertain negative effects
uncert_neg_effects = True
Dataframe_generator.generate_domain_dataframes(uncert_neg_effects)
Dataframe_generator.generate_domain_summary(uncert_neg_effects)
| [
2,
15069,
357,
66,
8,
33160,
11,
38083,
10923,
7697,
198,
2,
1439,
2489,
10395,
13,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
12,
7635,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,
262,
6808,
8619,
286,
428,
... | 2.298271 | 11,104 |
# coding:utf-8
# 2019-4-20
"""根据二值分割将原图进行分割
解决原因:对图像进行分割前对原图进行了增强处理,进行分割时是对增强图片进行分割,而不是基于原图分割。
现在根据对增强图片进行分割保存的二值图对原图进行分割,保存原图的分割图。
"""
import numpy as np
import os
import cv2
from tool import util
from tool import api
def getImageDict(originalFiles, binaryFiles):
"""获得原图对应的分割图路径
@param originalFiles 原图文件路径列表
@param binaryFiles 二值文件列表
@returns dict {"originalPicName":"binaryImagePath"}
"""
ret = {}
for f in originalFiles:
originalPicName = os.path.basename(f)
for binaryImagePath in binaryFiles:
picName = os.path.basename(binaryImagePath)
preffix = os.path.splitext(originalPicName)[0] # 原图文件名前缀
"""将匹配的文件添加到字典"""
if preffix in picName:
ret[originalPicName] = binaryImagePath
return ret
def segBasedBinaryImage(originalIamgePath, binaryImagePath):
"""使用二值图对原图进行分割"""
originalIamge = cv2.imread(originalIamgePath, 0)
binaryImage = cv2.imread(binaryImagePath, 0)
if np.shape(originalIamge) != np.shape(binaryImage):
print("[WARNING] original iamge size does not equal bianry image size!")
return
h,w = np.shape(binaryImage)
for i in range(h):
for j in range(w):
if binaryImage[i][j] == 0:
originalIamge[i][j] = 0
return originalIamge
def imagePattern(binaryFiles, imageNamepattern):
"""匹配图片文件名
@param binaryFiles 带匹配文件路径列表
@param imageNamePattern 匹配字符串
@returns list 文件路径列表
"""
ret = []
for f in binaryFiles:
basename = os.path.basename(f)
if imageNamepattern in basename:
ret.append(f)
return ret
def recover(originalIamgeDir, binaryIamgeDir, outputDir, imageNamepattern):
"""入口函数,前提条件为原图与二值图大小相同
不相同的话需要进行切边,切边函数见api.standardPicClip
@param OriginalIamgeDir 原图目录
@param binaryIamgeDir 二值图目录
@param outputDir 保存路径
@param imageNamepattern 二值图匹配字符
@returns None
"""
originalFiles = api.getFiles(originalIamgeDir)
rawbinaryFiles = api.getFiles(binaryIamgeDir)
binaryFiles = imagePattern(rawbinaryFiles, imageNamepattern)
binaryImageDict = getImageDict(originalFiles, binaryFiles)
util.mkdirs(outputDir)
failed = []
for f in originalFiles:
originalPicName = os.path.basename(f) # 原图文件名
print("[INFO] processing {}".format(originalPicName))
binaryImagePath = binaryImageDict.get(originalPicName, None) # 获得二值图路径
if not binaryImagePath:
print("[WARNING] image {} dose not map in {}".format(originalPicName, binaryIamgeDir))
failed.append(originalPicName)
continue
segImage = segBasedBinaryImage(f, binaryImagePath) # 获得分割图
outputPath = os.path.join(outputDir, originalPicName) # 获得保存图像路径
cv2.imwrite(outputPath, segImage)
print("[WARNING] failed to recover: {}".format(failed))
if __name__ == '__main__':
original = r"C:\Study\test\bone\cc\cc\old" # 未切边的原图目录
originalIamgeDir = r"C:\Study\test\bone\cc\cc\old_clip" # 切边后原图目录
binaryIamgeDir = r"C:\Study\test\bone\cc\cc\new" # 二值图所在目录
outputDir = r"C:\Study\test\bone\cc\ret" # 保存路径
imageNamepattern = "_thrshed_img_seg" # 二值图匹配字符串
# 切边
api.standardPicClip(original, originalIamgeDir, midName="")
recover(originalIamgeDir, binaryIamgeDir, outputDir, imageNamepattern)
| [
2,
19617,
25,
40477,
12,
23,
198,
2,
13130,
12,
19,
12,
1238,
198,
37811,
43718,
117,
162,
235,
106,
12859,
234,
161,
222,
120,
26344,
228,
30298,
110,
49546,
43889,
253,
32368,
122,
32573,
249,
26193,
234,
26344,
228,
30298,
110,
1... | 1.817536 | 1,688 |
#!/usr/bin/env python3
# Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import io
import logging
import os
import tarfile
import time
import multiprocessing
import torch
import torchaudio
import torchaudio.backend.sox_io_backend as sox
AUDIO_FORMAT_SETS = set(['flac', 'mp3', 'm4a', 'ogg', 'opus', 'wav', 'wma'])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--num_utts_per_shard',
type=int,
default=1000,
help='num utts per shard')
parser.add_argument('--num_threads',
type=int,
default=1,
help='num threads for make shards')
parser.add_argument('--prefix',
default='shards',
help='prefix of shards tar file')
parser.add_argument('--segments', default=None, help='segments file')
parser.add_argument('--resample',
type=int,
default=16000,
help='segments file')
parser.add_argument('wav_file', help='wav file')
parser.add_argument('text_file', help='text file')
parser.add_argument('shards_dir', help='output shards dir')
parser.add_argument('shards_list', help='output shards list file')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
torch.set_num_threads(1)
wav_table = {}
with open(args.wav_file, 'r', encoding='utf8') as fin:
for line in fin:
arr = line.strip().split()
assert len(arr) == 2
wav_table[arr[0]] = arr[1]
no_segments = True
segments_table = {}
if args.segments is not None:
no_segments = False
with open(args.segments, 'r', encoding='utf8') as fin:
for line in fin:
arr = line.strip().split()
assert len(arr) == 4
segments_table[arr[0]] = (arr[1], float(arr[2]), float(arr[3]))
data = []
with open(args.text_file, 'r', encoding='utf8') as fin:
for line in fin:
arr = line.strip().split(maxsplit=1)
key = arr[0]
txt = arr[1] if len(arr) > 1 else ''
if no_segments:
assert key in wav_table
wav = wav_table[key]
data.append((key, txt, wav))
else:
wav_key, start, end = segments_table[key]
wav = wav_table[wav_key]
data.append((key, txt, wav, start, end))
num = args.num_utts_per_shard
chunks = [data[i:i + num] for i in range(0, len(data), num)]
os.makedirs(args.shards_dir, exist_ok=True)
# Using thread pool to speedup
pool = multiprocessing.Pool(processes=args.num_threads)
shards_list = []
tasks_list = []
num_chunks = len(chunks)
for i, chunk in enumerate(chunks):
tar_file = os.path.join(args.shards_dir,
'{}_{:09d}.tar'.format(args.prefix, i))
shards_list.append(tar_file)
pool.apply_async(
write_tar_file,
(chunk, no_segments, tar_file, args.resample, i, num_chunks))
pool.close()
pool.join()
with open(args.shards_list, 'w', encoding='utf8') as fout:
for name in shards_list:
fout.write(name + '\n')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
357,
66,
8,
33448,
16540,
13038,
72,
3457,
13,
357,
41617,
25,
20828,
8800,
19439,
8,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,... | 2.146588 | 1,876 |
from models import ResNet_Spec, ResNet
import hiddenlayer as hl
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device = ", device)
model = ResNet(ResNet_Spec[18])
hl_graph = hl.build_graph(model, torch.zeros([1, 3, 512, 512]).to(device=device))
hl_graph.theme = hl.graph.THEMES["blue"].copy()
hl_graph.save('pose_net.png', 'png')
| [
6738,
4981,
1330,
1874,
7934,
62,
22882,
11,
1874,
7934,
198,
11748,
7104,
29289,
355,
289,
75,
198,
11748,
28034,
198,
198,
25202,
796,
28034,
13,
25202,
7203,
66,
15339,
25,
15,
1,
611,
28034,
13,
66,
15339,
13,
271,
62,
15182,
34... | 2.692857 | 140 |
import sys, re, glob
import numpy as np
import matplotlib.pyplot as plt
from cctk import GaussianFile, Molecule
import cctk.parse_gaussian as parse
#### Usage: ``python analyze_dipole.py "path/to/output/*.out"``
#### NOTE: It's crucial to wrap the wildcard-containing path in quotes!
#### NOTE: This file will reject any file that contains the string "slurm."
#### Corin Wagen and Eugene Kwan, 2019
filenames = sys.argv[1]
energies = {}
dipole = {}
nics = {}
C1_charge = {}
O7_charge = {}
C8_charge = {}
C9_charge = {}
C12_charge = {}
for filename in sorted(glob.glob(filenames, recursive=True)):
if re.search("slurm", filename):
continue
(output_file, lines) = GaussianFile.read_file(filename, return_lines=True)
dist = int(round(output_file.get_molecule().get_distance(1, 8) * 1000))
energies[dist] = output_file.energies[-1]
try:
nics[dist] = -1 * parse.find_parameter(lines, "17 Bq Isotropic", 8, 4)[0]
except:
pass
try:
dipole_line = parse.search_for_block(lines, "Dipole", "Quadrupole")
fields = re.split(" +", dipole_line)
fields = list(filter(None, fields))
dipole[dist] = float(fields[-1])
except:
pass
try:
C1_charge[dist] = parse.find_parameter(lines, " 1 C", 8, 2)[-1]
O7_charge[dist] = parse.find_parameter(lines, " 7 O", 8, 2)[-1]
C8_charge[dist] = parse.find_parameter(lines, " 8 C", 8, 2)[-1]
C9_charge[dist] = parse.find_parameter(lines, " 9 C", 8, 2)[-1]
C12_charge[dist] = parse.find_parameter(lines, " 12 C", 8, 2)[-1]
except:
pass
min_energy = np.min(list(energies.values()))
energies = {k: (e - min_energy) * 627.509 for k, e in energies.items()}
#### generate dipole graph
fig, ax = plt.subplots(nrows=3, figsize=(10,15))
ax[0].scatter(list(energies.keys()), list(energies.values()), c='black', alpha=0.8, label="Energy")
ax[0].set_ylim(top=30, bottom=0)
ax[0].set_xlabel("C1-C5 Distance (mÅ)")
ax[0].set_ylabel("Energy (kcal/mol; M06-2X)")
ax1 = ax[0].twinx()
ax1.scatter(list(dipole.keys()), list(dipole.values()), c='blue', alpha=0.8, label="Dipole")
ax1.set_ylim(top=3, bottom=0)
ax1.set_ylabel("Dipole Moment (M06-2X)")
ax1.set_title("Change in Dipole Moment over IRC")
ax1.legend(loc='upper right')
#### generate nics graph
ax[1].scatter(list(energies.keys()), list(energies.values()), c='black', alpha=0.8, label="Energy")
ax[1].set_ylim(top=30, bottom=0)
ax[1].set_xlabel("C1-C5 Distance (mÅ)")
ax[1].set_ylabel("Energy (kcal/mol; M06-2X)")
ax2 = ax[1].twinx()
ax2.scatter(list(nics.keys()), list(nics.values()), c='blue', alpha=0.8, label="NICS(0)")
ax2.set_ylabel("NICS(0) (M06-2X)")
ax2.set_title("Change in NICS(0) over IRC")
ax2.legend(loc='upper right')
#### generate pop graph
ax[2].scatter(list(energies.keys()), list(energies.values()), c='black', alpha=0.8, label="Energy")
ax[2].set_ylim(top=30, bottom=0)
ax[2].set_xlabel("C1-C5 Distance (mÅ)")
ax[2].set_ylabel("Energy (kcal/mol; M06-2X)")
ax3 = ax[2].twinx()
ax3.scatter(list(C1_charge.keys()), list(C1_charge.values()), c='blue', alpha=0.8, label="C1")
ax3.scatter(list(O7_charge.keys()), list(O7_charge.values()), c='red', alpha=0.8, label="O7")
ax3.scatter(list(C8_charge.keys()), list(C8_charge.values()), c='orange', alpha=0.8, label="C8")
ax3.scatter(list(C9_charge.keys()), list(C9_charge.values()), c='green', alpha=0.8, label="C9")
ax3.scatter(list(C12_charge.keys()), list(C12_charge.values()), c='purple', alpha=0.8, label="C12")
ax3.set_ylabel("Hirshfeld Charge (M06-2X)")
ax3.set_title("Change in Charges over IRC")
ax3.legend(loc='upper right')
#plt.show()
plt.tight_layout()
plt.savefig('graph.png')
| [
11748,
25064,
11,
302,
11,
15095,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
6738,
269,
310,
74,
1330,
12822,
31562,
8979,
11,
25726,
23172,
198,
11748,
269,
310,
74,
13... | 2.25258 | 1,647 |
import logging
import boto3
from ec2mgr import EC2Manager
from ssmmgr import SSMManager
class AWSDriver:
"""
The main program that handles the AWS related activities
"""
def init_vars(self, event, logger):
""" variable initialization """
self.logger = logger
self.logger.info("Method Entry.")
self.request_type = event['requestType']
self.operation = event['operation']
self.region = event['svcPayload'][0]['region']
self.vpc_id = event['svcPayload'][0]['vpcId']
self.pub_subnet_id = event['svcPayload'][0]['pubSubnetId']
self.pkey_name = event['svcPayload'][0]['pkeyName']
self.inst_type = event['svcPayload'][0]['instType']
self.name_tag = event['svcPayload'][0]['nameTag']
self.aws_akey = event['svcPayload'][0]['accessKey']
self.aws_skey = event['svcPayload'][0]['secretKey']
self.aws_token = event['svcPayload'][0]['sessionToken']
# Initialization - connection to AWS initialization
def sign_in(self):
""" setup initial connections to aws """
self.logger.info("Method Entry.")
#self.ec2 = boto3.resource('ec2', region_name=self.region, aws_access_key_id=self.aws_akey,
# aws_secret_access_key=self.aws_skey, aws_session_token=self.aws_token)
self.client = boto3.client('ec2', region_name=self.region, aws_access_key_id=self.aws_akey,
aws_secret_access_key=self.aws_skey, aws_session_token=self.aws_token)
self.ssm = boto3.client('ssm', region_name=self.region, aws_access_key_id=self.aws_akey,
aws_secret_access_key=self.aws_skey, aws_session_token=self.aws_token)
# setup resource handlers/managers
self.ec2mgr = EC2Manager(self.logger, self.client, self.region)
self.ssmmgr = SSMManager(self.logger, self.ssm, self.region)
def validate_request(self, event):
"""
This is do a minimal validation of request to check if expected params/values are present
:param event: incoming payload
:return: True/False indicating if the request params are valid or not
"""
self.logger.info("Method Entry.")
# lets first validate the inputs params for basic checking.
request_type = event['requestType']
operation = event['operation']
if request_type not in ['ec2']:
return False
if operation not in ['create', 'delete']:
return False
return True
# Main entry point for acting on the request
def process_request(self, event):
"""
Main entry point for Processing the request
:return:
"""
self.logger.info("Method Entry.")
if self.validate_request(event) != True:
return False
return self.__process_request(event)
def __process_request(self, event):
"""
Private impl method for the Processing the request
:return:
"""
self.logger.info("Method Entry.")
if event['requestType'] == 'ec2':
return self.__setup_ec2(event)
return False
| [
11748,
18931,
198,
11748,
275,
2069,
18,
198,
198,
6738,
9940,
17,
76,
2164,
1330,
13182,
17,
13511,
198,
6738,
37786,
3020,
2164,
1330,
6723,
44,
13511,
628,
198,
4871,
14356,
10305,
38291,
25,
198,
220,
220,
220,
37227,
198,
220,
22... | 2.31769 | 1,385 |
import argparse
import os
def run(path, flags):
"""TODO: Add method description!
Args:
path (str):
flags (argparse.Namespace):
"""
if flags.config and flags.default:
pass # Should raise error that flags cannot be both used
elif flags.config:
config(path, flags)
elif flags.default:
config(path, argparse.Namespace({"default": True}))
def config(path, flags):
"""TODO: Add method description!
Args:
path (str):
flags (argparse.Namespace):
"""
if os.path.exists(".pre-commit-config.yaml"):
print(f"Config file already found in {path}")
confirmation = input("Are you sure you want to reset your settings? (Y/N): ")
if confirmation.lower() != "y":
print("Terminating config command")
return
os.mknod(".pre-commit-config.yaml")
def hook(path, flags):
"""TODO: Add method description!
Args:
path (str):
flags (argparse.Namespace):
"""
if flags.config and flags.default:
pass # Throw an error
if flags.config:
config(path, flags)
elif flags.default:
pass
else:
pass
print("hook not yet implemented")
def ls(path, flags):
"""TODO: Add method description!
Args:
path (str):
flags (argparse.Namespace):
"""
print("ls not yet implemented!")
def reset(path, flags):
"""TODO: Add method description!
Args:
path (str):
flags (argparse.Namespace):
"""
confirmation = input("Are you sure you want to reset your settings? (Y/N): ")
if confirmation.lower() != "y":
print("Terminating reset command")
return
if flags.config or (not flags.config and not flags.default):
if not os.path.exists(path + ".pypcmgrconfig"):
raise ValueError(f".pypcmgrconfig not found in {path}")
os.remove(path + ".pypcmgrconfig")
print(f"Deleted .pypcmgrconfig in {path}")
if flags.hook or (not flags.config and not flags.default):
if not os.path.exists(path + ".pre-commit-config.yaml"):
raise ValueError(f".pre-commit-config.yaml not found in {path}")
os.remove(path + ".pre-commit-config.yaml")
print(f"Deleted .pre-commit-config.yaml in {path}")
| [
11748,
1822,
29572,
198,
11748,
28686,
628,
198,
4299,
1057,
7,
6978,
11,
9701,
2599,
198,
220,
220,
220,
37227,
51,
3727,
46,
25,
3060,
2446,
6764,
0,
628,
220,
220,
220,
943,
14542,
25,
198,
220,
220,
220,
220,
220,
220,
220,
31... | 2.379734 | 977 |
from typing import ClassVar
from .. import command, module, util
| [
6738,
19720,
1330,
5016,
19852,
198,
198,
6738,
11485,
1330,
3141,
11,
8265,
11,
7736,
628
] | 4.1875 | 16 |
first = Node();
first.data = 5;
ll = LinkedList();
ll.add(first);
blah = Node();
ll.add(blah);
print(first);
| [
197,
197,
198,
197,
198,
197,
197,
628,
198,
11085,
796,
19081,
9783,
198,
198,
11085,
13,
7890,
796,
642,
26,
198,
198,
297,
796,
7502,
276,
8053,
9783,
198,
297,
13,
2860,
7,
11085,
1776,
628,
198,
2436,
993,
796,
19081,
9783,
1... | 2.1 | 60 |
import cauldron as cd
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0}) # NOQA
import seaborn as sns
sns.set() # NOQA, need this for styling
import pandas as pd
import os, sys # NOQA
sys.path.append('../../src/data')
import make_dataset # NOQA, need the lines above to get directories right
# Import df from shared Cauldron memory
df = cd.shared.df
cd.display.markdown(
"""
## Sales vs Customers
As noted above, **customers** has a correlation of 0.90 with **sales**.
It's pretty obvious on the chart below; the more customers, the more sales.
Note also that as we bring in more customers, the relationship gets less
strong, until it starts to break down around 5,000 customers in a given
store (clearly only a few stores could even fit 5,000 customers in a day).
We don't know the specific definition of 'customer' in this case, or how
they're counted. Is it someone who bought, or just someone who came into
the store? Do internet visitors/buyers count? In any case, we'll want to
work with the marketing team to bring more people through the doors
(virtual and physical).
For now, since the correlation with sales is so strong, and since our
neural network model will manage the relationship between customers and
sales implicitly for us, let's continue to focus on **sales** and keep
**customers** as a secondary focus.
"""
)
# Prep data for display
avg_sales_by_customers = df.groupby('customers').sales.mean()
# Create and display the chart
fig, ax = plt.subplots()
ax.plot(avg_sales_by_customers)
ax.set_title('Average Sales by Number of Customers')
ax.set_xlabel('Number of Customers')
ax.set_ylabel('Average Sales')
ax.set_xticklabels(['{:,.0f}'.format(x) for x in ax.get_xticks()])
ax.set_yticklabels(['${:,.0f}'.format(x) for x in ax.get_yticks()])
cd.display.pyplot(fig)
| [
11748,
269,
45637,
355,
22927,
198,
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
489,
83,
13,
6015,
10044,
4105,
13,
19119,
15090,
6,
26875,
13,
9806,
62,
9654,
62,
43917,
10354,
657,
30... | 3.119086 | 613 |
# -*-coding:Utf-8 -*
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier contient la classe Prototype, détaillée plus bas."""
from collections import OrderedDict
from abstraits.obase import BaseObj
from bases.collections.flags import Flags
from primaires.format.description import Description
from primaires.perso.stats import Stats
from .script import ScriptPNJ
# Constantes
FLAGS = Flags()
FLAGS.ajouter("nyctalope", 2)
class Prototype(BaseObj):
"""Classe représentant un prototype de PNJ.
"""
enregistrer = True
nom_scripting = "le prototype de PNJ"
def __init__(self, cle):
"""Constructeur d'un type"""
BaseObj.__init__(self)
self.cle = cle
self._attributs = {}
self.no = 0 # nombre de PNJ créés sur ce prototype
self.pnj = []
# Prototypes
self.nom_singulier = "quelqu'un"
self.etat_singulier = "se tient ici"
self.nom_pluriel = "quelques-uns"
self.etat_pluriel = "se tiennent ici"
self.noms_sup = []
self.description = Description(parent=self)
self.background = Description(parent=self, scriptable=False)
self._race = None
self.genre = "aucun"
self.stats = Stats(self)
self.squelette = None
self.equipement = OrderedDict()
self.niveau = 1
self.gain_xp = 0
self.script = ScriptPNJ(self)
self.a_depecer = {}
self.entraine_stats = {}
self.talents = {}
self.sorts = {}
self.flags = 0
# Salles repop
self.salles_repop = {}
self._construire()
@property
def nom_race(self):
"""Retourne le nom de la race si existant ou une chaîne vide."""
return (self.race and self.race.nom) or ""
race = property(_get_race, _set_race)
def get_nom(self, nombre):
"""Retourne le nom complet en fonction du nombre.
Par exemple :
Si nombre == 1 : retourne le nom singulier
Sinon : retourne le nombre et le nom pluriel
"""
if nombre <= 0:
raise ValueError("la fonction get_nom a été appelée avec un " \
"nombre négatif ou nul")
elif nombre == 1:
return self.nom_singulier
else:
if self.noms_sup:
noms_sup = list(self.noms_sup)
noms_sup.reverse()
for nom in noms_sup:
if nombre >= nom[0]:
return nom[1]
return str(nombre) + " " + self.nom_pluriel
def get_nom_etat(self, personnage, nombre):
"""Retourne le nom et l'état en fonction du nombre."""
nom = self.get_nom(nombre)
if nombre == 1:
return nom + " " + self.etat_singulier
else:
if self.noms_sup:
noms_sup = list(self.noms_sup)
noms_sup.reverse()
for nom_sup in noms_sup:
if nombre >= nom_sup[0]:
return nom + " " + nom_sup[2]
return nom + " " + self.etat_pluriel
@property
def genres_possibles(self):
"""Retourne les genres disponibles pour le personnage"""
if self.race is not None:
return self.race.genres.str_genres
else:
return "masculin, féminin"
def est_masculin(self):
"""Retourne True si le personnage est masculin, False sinon"""
if self.race is not None:
return self.race.genres[self.genre] == "masculin" or \
self.genre == "aucun"
else:
return self.genre == "masculin" or self.genre == "aucun"
@property
@property
@property
@property
@property
@property
def xp_absolue(self):
"""Retourne l'XP absolu."""
try:
xp = importeur.perso.gen_niveaux.grille_xp[self.niveau][1]
except IndexError:
return 0
xp = int(xp * self.gain_xp / 100)
return xp
def a_flag(self, nom_flag):
"""Retourne True si le prototype a le flag, False sinon."""
valeur = FLAGS[nom_flag]
return self.flags & valeur != 0
def detruire(self):
"""Destruction du prototype."""
for objet, nb in self.a_depecer:
if self in objet.depecer_de:
objet.depecer_de.remove(self)
BaseObj.detruire(self)
| [
2,
532,
9,
12,
66,
7656,
25,
18274,
69,
12,
23,
532,
9,
198,
2,
532,
9,
12,
66,
7656,
25,
18274,
69,
12,
23,
532,
9,
198,
198,
2,
15069,
357,
66,
8,
3050,
12,
5539,
12509,
10351,
5777,
18653,
198,
2,
1439,
2489,
10395,
13,
... | 2.279186 | 2,604 |
import logging
import os
import shutil
import sys
import portend
from django.apps import apps
from django.core.management import call_command
from django.db.utils import OperationalError
from .conf import KOLIBRI_HOME
from .conf import OPTIONS
from .options import generate_empty_options_file
from .server import get_status
from .server import LISTEN_ADDRESS
from .server import NotRunning
logger = logging.getLogger(__name__)
PORT_AVAILABILITY_CHECK_TIMEOUT = 2
def check_other_kolibri_running(port):
"""
Make sure there are no other Kolibri instances running before starting the server.
"""
try:
# Check if there are other kolibri instances running
# If there are, then we need to stop users from starting kolibri again.
get_status()
logger.error(
"There is another Kolibri server running. "
"Please use `kolibri stop` and try again."
)
sys.exit(1)
except NotRunning:
# In case that something other than Kolibri occupies the port,
# check the port's availability.
check_port_availability(LISTEN_ADDRESS, port)
def check_port_availability(host, port):
"""
Make sure the port is available for the server to start.
"""
try:
portend.free(host, port, timeout=PORT_AVAILABILITY_CHECK_TIMEOUT)
except portend.Timeout:
# Bypass check when socket activation is used
# https://manpages.debian.org/testing/libsystemd-dev/sd_listen_fds.3.en.html#ENVIRONMENT
if not os.environ.get("LISTEN_PID", None):
# Port is occupied
logger.error(
"Port {} is occupied.\n"
"Please check that you do not have other processes "
"running on this port and try again.\n".format(port)
)
sys.exit(1)
def check_content_directory_exists_and_writable():
"""
Make sure the content directory of Kolibri exists and is writable.
"""
content_directory = OPTIONS["Paths"]["CONTENT_DIR"]
# Check if the content directory exists
if not os.path.exists(content_directory):
try:
os.makedirs(content_directory)
except OSError:
logger.error(
"The content directory {} does not exist and cannot be created.".format(
content_directory
)
)
sys.exit(1)
# Check if the directory is writable
if not os.access(content_directory, os.W_OK):
logger.error(
"The content directory {} is not writable.".format(content_directory)
)
sys.exit(1)
def check_log_file_location():
"""
Starting from Kolibri v0.12.4, log files are going to be renamed and moved
from KOLIBRI_HOME directory to KOLIBRI_HOME/logs directory.
"""
home = os.environ["KOLIBRI_HOME"]
log_location_update = {}
# Old log file names
old_daemon_log = "server.log"
old_kolibri_log = "kolibri.log"
old_debug_log = "debug.log"
# New log file names
log_location_update[old_daemon_log] = "daemon.txt"
log_location_update[old_kolibri_log] = "kolibri.txt"
log_location_update[old_debug_log] = "debug.txt"
for log in log_location_update:
old_log_path = os.path.join(home, log)
if os.path.exists(old_log_path):
new_log_path = os.path.join(home, "logs", log_location_update[log])
shutil.move(old_log_path, new_log_path)
def migrate_databases():
"""
Try to migrate all active databases. This should not be called unless Django has
been initialized.
"""
from django.conf import settings
for database in settings.DATABASES:
call_command("migrate", interactive=False, database=database)
# load morango fixtures needed for certificate related operations
call_command("loaddata", "scopedefinitions")
def check_database_is_migrated():
"""
Use a check that the database instance id model is initialized to check if the database
is in a proper state to be used. This must only be run after django initialization.
"""
apps.check_apps_ready()
from django.db import connection
from morango.models import InstanceIDModel
try:
InstanceIDModel.get_or_create_current_instance()[0]
connection.close()
return
except OperationalError:
try:
migrate_databases()
return
except Exception as e:
logging.error(
"Tried to migrate the database but another error occurred: {}".format(e)
)
except Exception as e:
logging.error(
"Tried to check that the database was accessible and an error occurred: {}".format(
e
)
)
sys.exit(1)
| [
11748,
18931,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
25064,
198,
198,
11748,
2493,
437,
198,
6738,
42625,
14208,
13,
18211,
1330,
6725,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
1330,
869,
62,
21812,
198,
6738,
42625,
... | 2.467692 | 1,950 |
# -*- coding: utf-8 -*-
"""
Sensitization Visit view.
"""
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from mspray.apps.main.models.sensitization_visit import (
create_sensitization_visit
)
class SensitizationVisitView(APIView):
"""Sensitization visit viewset."""
def post(self, request):
"""Handle Sensitization visit submissions."""
create_sensitization_visit(request.data)
return Response(status=status.HTTP_201_CREATED)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
50,
641,
270,
1634,
16440,
1570,
13,
198,
37811,
198,
198,
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
... | 2.842105 | 190 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
import os
import sys
from django.core.management.base import BaseCommand
from optparse import make_option
from uninond.exports import export_to
logger = logging.getLogger(__name__)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43907,
25,
257,
72,
40379,
28,
19,
39747,
28,
19,
2123,
1509,
28,
19,
14364,
198,
198,
6738,
11593,
37443,
834,
... | 2.657534 | 146 |
from __future__ import print_function
# to filter some unnecessory warning messages
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import os
import numpy as np
import pandas as pd
import glob
import keras
from keras import backend as K
import random as rn
import cv2
import shutil
if __name__ == '__main__':
img_type = "png"
root_dir = os.path.abspath(".")
fp_cnn_model = os.path.join(root_dir, "models", "model_bmus.h5")
bmus_dir = os.path.join(root_dir, "data", "bmus")
save_dir = os.path.join(root_dir, "res")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
print(save_dir)
cnn_model = load_model(fp_cnn_model)
predict_imgs_from_dir(
in_model=cnn_model,
in_src_dir=bmus_dir,
img_type=img_type,
in_save_dir=save_dir)
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
2,
284,
8106,
617,
11689,
652,
6509,
6218,
198,
11748,
14601,
198,
198,
40539,
654,
13,
24455,
40539,
654,
7203,
46430,
1600,
3275,
2625,
77,
32152,
13,
67,
4906,
2546,
3421,
4943,
1... | 2.421053 | 380 |
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/filebrowser/', include('filebrowser.urls')),
# Uncomment the next line to enable the admin:
# tinymce
url(r'^tinymce/', include('tinymce.urls')),
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
2291,
11,
19016,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
12708,
... | 2.859031 | 227 |
import numpy as np
import cv2
TOTAL_IMAGES = 1000
STORE_IMAGES = False
IMAGE_DIMENSION = 100
NUM_CHANNELS = 1
PADDING_MIN = 10
PADDING_MAX = 20
MIN_BOX_DIM = 40
if __name__ == "__main__":
main()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
198,
51,
27510,
62,
3955,
25552,
796,
8576,
198,
2257,
6965,
62,
3955,
25552,
796,
10352,
198,
3955,
11879,
62,
35,
3955,
16938,
2849,
796,
1802,
198,
41359,
62,
3398,
22846... | 2.266667 | 90 |
#!/usr/bin/env python
import torch
from .robot_model import RobotModel
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
28034,
198,
6738,
764,
305,
13645,
62,
19849,
1330,
16071,
17633,
198,
220,
220,
220,
220
] | 2.884615 | 26 |
import os
from unittest import TestCase
from litter_getter import pubmed
| [
11748,
28686,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
25359,
62,
1136,
353,
1330,
2240,
1150,
628,
628
] | 3.5 | 22 |
#!/usr/bin/python3
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QWidget
from gui.button_panel import *
from gui.video_panel import *
from gui.xy_pad_panel import *
if __name__ == '__main__':
GuiThread()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
11748,
25064,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
14055,
11,
33734,
8205,
72,
11,
33734,
54,
312,
11407,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
234... | 2.663265 | 98 |
# 配置
import logging
from redis import StrictRedis
zd={'ts':tiaoshi,'xs':xianshang} | [
2,
16268,
227,
235,
163,
121,
106,
198,
11748,
18931,
198,
6738,
2266,
271,
1330,
520,
2012,
7738,
271,
628,
628,
198,
89,
67,
34758,
6,
912,
10354,
83,
544,
13704,
4032,
34223,
10354,
87,
1547,
33255,
92
] | 2.263158 | 38 |
# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from ..items import SchoolSpiderItem
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
15881,
88,
198,
6738,
15881,
88,
13,
4023,
1330,
19390,
198,
6738,
11485,
23814,
1330,
3961,
41294,
7449,
628,
198
] | 3.114286 | 35 |
import numpy as np
import json
import PIL.ImageFont as ImageFont
import PIL.Image as pil
from PIL import ImageDraw
import matplotlib as mpl
import matplotlib.cm as cm
import cv2 as cv
import torch
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torchvision import transforms
import time
import os
import sys
sys.path.append(os.getcwd())
from Utils.import_choice import JsonArg, Stage, json_to_data
from _Dataset.kitti import KittiColorDataset, Dataset_Options
from Metric.logger import *
from Utils.visualization import visualize_depth
if __name__ == "__main__":
metric = Metric()
metric.test_all()
metric.test_sample()
metric.test_choice()
# metric.test_choice(True)
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
33918,
201,
198,
11748,
350,
4146,
13,
5159,
23252,
355,
7412,
23252,
201,
198,
11748,
350,
4146,
13,
5159,
355,
5560,
201,
198,
6738,
350,
4146,
1330,
7412,
25302,
201,
198,
11748,
2603,
... | 2.70318 | 283 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 15 13:23:25 2018
@author: BallBlueMeercat
"""
# Eq of state parameters for known fluids:
w_r = 1/3 # radiation
w_m = 0.0 # matter
w_de = -1.0 # cosmological constant (dark energy?)
def zfirstderivs(v, t, gamma):
"""
Takes in:
v = values at z=0;
t = list of redshifts to integrate over;
gamma = interaction term.
Returns a function f = [dt/dz, d(a)/dz,
d(e'_m)/dz, d(e'_de)/dz,
d(z)/dz,
d(dl)/dz]
"""
(t, a, e_dashm, e_dashde, z, dl) = v #omegam, omegade, z, dl) = v
Hz = (e_dashm+e_dashde)**(1/2)
import numpy as np
if np.isnan(Hz):
print("z = %s, Hz = %s, gamma = %s, e'_m = %s, e'_de = %s"%(z, Hz, gamma, e_dashm, e_dashde))
# fist derivatives of functions I want to find:
f = [# dt/dz (= f.d wrt z of time)
-1/(1+z)/Hz,
# d(a)/dz (= f.d wrt z of scale factor)
-(1+z)**(-2),
# d(e'_m)/dz (= f.d wrt z of density_m(t) / crit density(t0))
3*e_dashm /(1+z) - gamma/(1+z)/Hz,
# d(e'_de)/dz (= f.d wrt z of density_de(t) / crit desnity(t0))
gamma/(1+z)/Hz,
# d(z)/dz (= f.d wrt z of redshift)
1,
# d(dl)/dz (= f.d wrt z of luminosty distance)
1/Hz] # H + Hdz*(1+z)
return f | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
3158,
1315,
1511,
25,
1954,
25,
1495,
2864,
198,
198,
31,
9800,
25,
6932,
14573,
5... | 1.669956 | 912 |
""" Smoke test site runner """
# pylint:disable=invalid-name
import logging
import os
try:
import authl.flask
except ImportError:
authl = None
try:
import whoosh
except ImportError:
whoosh = None
import flask
import publ
import publ.image
APP_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests')
logging.basicConfig(level=logging.DEBUG if 'FLASK_DEBUG' in os.environ else logging.WARNING)
config = {
'database_config': {
'provider': 'sqlite',
'filename': os.path.join(APP_PATH, '..', 'index.db')
},
'content_folder': os.path.join(APP_PATH, 'content'),
'template_folder': os.path.join(APP_PATH, 'templates'),
'static_folder': os.path.join(APP_PATH, 'static'),
'cache': {
'CACHE_TYPE': os.environ['TEST_CACHING'],
'CACHE_DEFAULT_TIMEOUT': 600,
'CACHE_THRESHOLD': 20
} if os.environ.get('TEST_CACHING') else {
'CACHE_TYPE': 'NullCache',
'CACHE_NO_NULL_WARNING': True
},
'auth': {
'TEST_ENABLED': True,
'INDIEAUTH_CLIENT_ID': authl.flask.client_id if authl else None,
'FEDIVERSE_NAME': 'Publ test suite',
'TWITTER_CLIENT_KEY': os.environ.get('TWITTER_CLIENT_KEY'),
'TWITTER_CLIENT_SECRET': os.environ.get('TWITTER_CLIENT_SECRET'),
'EMAIL_SENDMAIL': print,
'EMAIL_FROM': 'nobody@example.com',
'EMAIL_SUBJECT': 'Log in to authl test',
'EMAIL_CHECK_MESSAGE': 'Use the link printed to the test console',
} if authl else {},
'user_list': os.path.join(APP_PATH, 'users.cfg'),
'layout': {
'max_width': 768,
},
'search_index': '_index' if whoosh else None,
'index_enable_watchdog': False,
}
app = publ.Publ(__name__, config)
app.secret_key = "We are insecure"
@app.route('/favicon.<ext>')
def favicon(ext):
""" render a favicon """
logo = publ.image.get_image('images/rawr.jpg', 'tests/content')
img, _ = logo.get_rendition(format=ext, width=128, height=128, resize='fill')
return flask.redirect(img)
@app.path_alias_regex(r'(.*)/date/([0-9]+)')
def date_view(match):
""" Simple test of regex path aliases, maps e.g. /foo/date/2020 to /foo/?date=2020 """
return flask.url_for('category', category=match.group(1),
date=match.group(2)), True
| [
37811,
25416,
1332,
2524,
17490,
37227,
198,
2,
279,
2645,
600,
25,
40223,
28,
259,
12102,
12,
3672,
198,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
28311,
25,
198,
220,
220,
220,
1330,
6284,
75,
13,
2704,
2093,
198,
16341,
17267... | 2.264849 | 1,027 |
import streamlit as st
import pandas as pd
import numpy as np
import time
import uber_display
from scipy import stats
"# Numpy and Pandas Tutorial"
"### Semana i 2019"
"Made in Streamlit"
if st.checkbox('Show Uber Data'):
st.subheader('Uber data data')
uber_display.main()
"""# Numpy exercises """
"- **Show numpy version**"
version = np.__version__
result = "Numpy version : {}".format(version)
#Answer
result
"- **Create the array :** "
" *[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]*"
#Answer
result = np.array([0,1,2,3,4,5,6,7,])
"Answer"
result
"- **Select the cell located in row 2 column 2 from this array**"
arr = np.array(([21, 22, 23], [11, 22, 33], [43, 77, 89]))
"*Array*"
arr
#Answer
result = arr[1][1]
result
"- **Select the column with index 0**"
arr = np.array(([21, 22, 23], [11, 22, 33], [43, 77, 89]))
arr
#Answer
result = arr.T[0]
result
"- **Extract all the odd numbers in the next array**"
" *[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]*"
arr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
arr = arr[arr % 2 == 1 ]
result = arr
result
"""- **Replace de odd numbers with negative numbers in the next array**"""
" *[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]*"
arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
#Answer
l = []
for x in range(len(arr)):
if(arr[x] % 2 == 1):
l.append(-arr[x])
else:
l.append(arr[x])
result = l
result
"- **Reshape the next array from 1D to 2D**"
" *[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]*"
arr = np.arange(10)
#Answer
arr = arr.reshape([2,5])
result = arr
result
"- **Compute euclidian distance between A and B **"
"A"
a = np.array([1,2,3,4,5])
a
"B"
b = np.array([4,5,6,7,8])
b
#Answer
result = np.linalg.norm(a-b)
result
"- **Find the most frequent value of petal length (3rd column) in the [iris dataset](https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data)**"
"*Dataset*"
iris = downloadIrisDataset()
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
#Answer
moda = stats.mode(iris.T[2])
result = moda[0][0]
result | [
11748,
4269,
18250,
355,
336,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
11748,
48110,
62,
13812,
198,
6738,
629,
541,
88,
1330,
9756,
198,
198,
1,
2,
399,
32152,
290,
16492,
292,
... | 2.334873 | 866 |
from secrets import token_urlsafe
from starlette.config import Config
# Config will be read from environment variables and/or ".env" files.
from starlette.datastructures import Secret
config = Config(".env")
DEBUG = config("DEBUG", cast=bool, default=False)
TESTING = config("TESTING", cast=bool, default=False)
HTTPS_ONLY = config("HTTPS_ONLY", cast=bool, default=False)
GZIP_COMPRESSION = config("GZIP", cast=bool, default=False)
SECRET = config("SECRET", cast=Secret, default=token_urlsafe(10))
AIRTABLE_BASE_KEY = config("AIRTABLE_BASE_KEY", cast=Secret)
AIRTABLE_API_KEY = config("AIRTABLE_API_KEY", cast=Secret)
| [
6738,
13141,
1330,
11241,
62,
6371,
21230,
198,
198,
6738,
3491,
21348,
13,
11250,
1330,
17056,
198,
198,
2,
17056,
481,
307,
1100,
422,
2858,
9633,
290,
14,
273,
27071,
24330,
1,
3696,
13,
198,
6738,
3491,
21348,
13,
19608,
459,
1356... | 3.079208 | 202 |
"""Order 30: Crawl all cainiao stations
from url 'https://cart.taobao.com/cart.htm?spm=875.7931836%2FB.a2226mz.11.67fc5d461PCKtS&from=btop'
"""
import time
from faker import Faker
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from scrapy import Selector
from scrapy.http import HtmlResponse
from pydispatch import dispatcher
import xlwt
# 引入配置对象DesiredCapabilities
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
dcap = dict(DesiredCapabilities.PHANTOMJS)
# 从USER_AGENTS列表中随机选一个浏览器头,伪装浏览器
fk = Faker()
dcap["phantomjs.page.settings.userAgent"] = fk.user_agent()
# 不载入图片,爬页面速度会快很多
dcap["phantomjs.page.settings.loadImages"] = False
statation = CrawlAllCainiaoStations()
statation.regsitry_event('loading', call_loading)
statation.regsitry_event('load_done_area', call_load_done_area)
statation.regsitry_event('get_station_data', call_get_station_data)
statation.login()
| [
198,
37811,
18743,
1542,
25,
327,
13132,
477,
269,
391,
13481,
8985,
198,
198,
6738,
19016,
705,
5450,
1378,
26674,
13,
8326,
672,
5488,
13,
785,
14,
26674,
13,
19211,
30,
2777,
76,
28,
31360,
13,
3720,
36042,
2623,
4,
17,
26001,
13... | 2.330097 | 412 |
#!/usr/bin/python3
# -*- coding! utf-8 -*-
import os
import time
from subprocess import Popen, PIPE
from datetime import datetime
import psutil
if __name__ == '__main__':
bat_sign = []
while True:
os.system('xsetroot -name "{}"'.format(sys_state()))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
0,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
11748,
28686,
198,
11748,
640,
198,
6738,
850,
14681,
1330,
8099,
268,
11,
350,
4061,
36,
198,
6738,
4818,
8079,
133... | 2.518519 | 108 |
""" Python Workplace Simulation
Author: Garrett Guevara
Written/Tested in Python Version 3.5.2
Task: Your company is headquartered in Portland, OR.
They've opened two new branches in NYC and London. They
ask that you create a program that tells if the branches
are open or closed based on the current time at HQ.
All branches are open 9:00AM-9:00PM
"""
import datetime
class Branch(object):
""" A branch object for the hypothetical company. Each branch
has the following attributes:
name: A string with the branch name by location, i.e. "Portland"
timezone: An integer that is the correct hour difference from UTC
"""
# declare local opening and closing hour for branch, 9 AM to 9 PM
opening_hour = 9 # 9 AM
closing_hour = opening_hour + 12 # 9 PM
def is_open(self):
""" Compares if the current time adjusted for timezone is between
the variables opening_hour and closing_hour. Returns "open" or "closed".
"""
# find the current time in UTC
now = datetime.datetime.utcnow()
# add the now variable to the timezone argument
hour_in_timezone = now.hour + self.timezone
# if that hour is between 9 AM or 9 PM, return "open", else "closed"
if self.opening_hour <= hour_in_timezone < self.closing_hour:
return "open"
else:
return "closed"
# tell the person the current time based on the server they are using
currtime = datetime.datetime.now()
print("Hello, your current time is " + currtime.strftime('%H:%M:%S') + ".\n")
# declare array of three branches with correct timezone argument
branches = [
Branch('Portland', -8),
Branch('New York', -5),
Branch('London', 0)
]
# loop through list and print a string telling if it's open or closed
for branch in branches:
print(branch)
| [
37811,
11361,
5521,
5372,
41798,
198,
198,
13838,
25,
27540,
402,
518,
85,
3301,
198,
25354,
14,
51,
7287,
287,
11361,
10628,
513,
13,
20,
13,
17,
198,
198,
25714,
25,
3406,
1664,
318,
48583,
287,
10727,
11,
6375,
13,
198,
2990,
105... | 3.008143 | 614 |
"""
reddit_detective.analytics provides basic metrics for a given Node.
For more complex stuff, use Neo4j GDSC or this package:
https://github.com/neo4j-graph-analytics/networkx-neo4j
"""
| [
37811,
198,
10748,
62,
15255,
13967,
13,
38200,
14094,
3769,
4096,
20731,
329,
257,
1813,
19081,
13,
198,
198,
1890,
517,
3716,
3404,
11,
779,
21227,
19,
73,
402,
5258,
34,
393,
428,
5301,
25,
198,
5450,
1378,
12567,
13,
785,
14,
71... | 3.048387 | 62 |
import http.client
import re
from collections import OrderedDict
from urllib.parse import urlparse
from django.core.exceptions import SuspiciousOperation
from django.http import HttpResponse, JsonResponse # NOQA
from django.utils.encoding import iri_to_uri
'''Add some missing HttpResponse sub-classes'''
STATUS_CODES = list(http.client.responses.items()) + [
(308, 'PERMANENT REDIRECT'),
(427, 'BAD GEOLOCATION'),
]
STATUS_CODES = tuple(sorted(STATUS_CODES))
STATUS = OrderedDict(STATUS_CODES)
# Set constant-like properties for reverse lookup
for code, label in STATUS_CODES:
setattr(STATUS, re.sub(r'\W', '_', label.upper()), code)
class BaseHttpResponse(HttpResponse, Exception):
'''
A sub-class of HttpResponse that is also an Exception, allowing us to
raise/catch it.
With thanks to schinkel's repose.
'''
#
# Success Responses (2xx)
#
class HttpResponseSuccess(BaseHttpResponse):
'''A base class for all 2xx responses, so we can issubclass test.'''
#
# Redirection Responses (3xx)
#
class HttpResponseRedirection(BaseHttpResponse):
'''A base class for all 3xx responses.'''
class LocationHeaderMixin:
'''Many 3xx responses require a Location header'''
url = property(lambda self: self['Location'])
#
# Common ancestor for 4xx and 5xx responses
#
class HttpResponseError(BaseHttpResponse):
'''Common base class for all error responses'''
#
# Client Error Responses (4xx)
#
class HttpResponseClientError(HttpResponseError):
'''A base class for all 4xx responses.'''
# XXX Auth-Realm ?
#
# Server Error (5xx)
#
class HttpResponseServerError(HttpResponseError):
'''A base class for 5xx responses.'''
| [
198,
11748,
2638,
13,
16366,
198,
11748,
302,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
29572,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
31922,
6243,
32180,
198... | 2.923986 | 592 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import Union, Dict
import uuid
import json
from .my_utils import json_dumps
from .constants import Constants, ConnStrKeys, Cloud, Schema
from .kql_response import KqlQueryResponse, KqlSchemaResponse, KqlError
# from .my_aad_helper import _MyAadHelper, ConnKeysKCSB
from .my_aad_helper_msal import _MyAadHelper, ConnKeysKCSB
from ._version import __version__
from .log import logger
from .kql_client import KqlClient
from .exceptions import KqlEngineError
class DraftClient(KqlClient):
"""Draft Client
Parameters
----------
conn_kv : dict
Connection string key/value that contains the credentials to access the resource via Draft.
domain: str
The Draft client domain, either apps for the case of Application Insights or workspaces for the case Log Analytics.
data_source: str
The data source url.
"""
#
# Constants
#
_DRAFT_CLIENT_BY_CLOUD = {
Cloud.PUBLIC: "db662dc1-0cfe-4e1c-a843-19a68e65be58",
Cloud.MOONCAKE: "db662dc1-0cfe-4e1c-a843-19a68e65be58",
Cloud.FAIRFAX: "730ea9e6-1e1d-480c-9df6-0bb9a90e1a0f",
Cloud.BLACKFOREST: "db662dc1-0cfe-4e1c-a843-19a68e65be58",
Cloud.PPE: "db662dc1-0cfe-4e1c-a843-19a68e65be58",
}
_DRAFT_CLIENT_BY_CLOUD[Cloud.CHINA] = _DRAFT_CLIENT_BY_CLOUD[Cloud.MOONCAKE]
_DRAFT_CLIENT_BY_CLOUD[Cloud.GOVERNMENT] = _DRAFT_CLIENT_BY_CLOUD[Cloud.FAIRFAX]
_DRAFT_CLIENT_BY_CLOUD[Cloud.GERMANY] = _DRAFT_CLIENT_BY_CLOUD[Cloud.BLACKFOREST]
_WEB_CLIENT_VERSION = __version__
_API_VERSION = "v1"
_GET_SCHEMA_QUERY = ".show schema"
_APPINSIGHTS_URL_BY_CLOUD = {
Cloud.PUBLIC: "https://api.applicationinsights.io",
Cloud.MOONCAKE: "https://api.applicationinsights.azure.cn",
Cloud.FAIRFAX: "https://api.applicationinsights.us",
Cloud.BLACKFOREST: "https://api.applicationinsights.de",
}
_APPINSIGHTS_URL_BY_CLOUD[Cloud.CHINA] = _APPINSIGHTS_URL_BY_CLOUD[Cloud.MOONCAKE]
_APPINSIGHTS_URL_BY_CLOUD[Cloud.GOVERNMENT] = _APPINSIGHTS_URL_BY_CLOUD[Cloud.FAIRFAX]
_APPINSIGHTS_URL_BY_CLOUD[Cloud.GERMANY] = _APPINSIGHTS_URL_BY_CLOUD[Cloud.BLACKFOREST]
_LOGANALYTICS_URL_BY_CLOUD = {
Cloud.PUBLIC: "https://api.loganalytics.io",
Cloud.MOONCAKE: "https://api.loganalytics.azure.cn",
Cloud.FAIRFAX: "https://api.loganalytics.us",
Cloud.BLACKFOREST: "https://api.loganalytics.de",
}
_LOGANALYTICS_URL_BY_CLOUD[Cloud.CHINA] = _LOGANALYTICS_URL_BY_CLOUD[Cloud.MOONCAKE]
_LOGANALYTICS_URL_BY_CLOUD[Cloud.GOVERNMENT] = _LOGANALYTICS_URL_BY_CLOUD[Cloud.FAIRFAX]
_LOGANALYTICS_URL_BY_CLOUD[Cloud.GERMANY] = _LOGANALYTICS_URL_BY_CLOUD[Cloud.BLACKFOREST]
_DRAFT_URLS_BY_SCHEMA = {
Schema.APPLICATION_INSIGHTS: _APPINSIGHTS_URL_BY_CLOUD,
Schema.LOG_ANALYTICS: _LOGANALYTICS_URL_BY_CLOUD
}
@property
def execute(self, id:str, query:str, accept_partial_results:bool=False, **options)->Union[KqlQueryResponse, KqlSchemaResponse]:
""" Execute a simple query or a metadata query
Parameters
----------
id : str
the workspaces (log analytics) or appid (application insights).
query : str
Query to be executed
accept_partial_results : bool, optional
Optional parameter. If query fails, but we receive some results, we consider results as partial.
If this is True, results are returned to client, even if there are exceptions.
If this is False, exception is raised. Default is False.
oprions["timeout"] : float, optional
Optional parameter. Network timeout in seconds. Default is no timeout.
Returns
-------
object
KqlQueryResponse instnace if executed simple query request
KqlSchemaResponse instnace if executed metadata request
Raises
------
KqlError
If request to draft failed.
If response from draft contains exceptions.
"""
#
# create API url
#
is_metadata = query == self._GET_SCHEMA_QUERY
api_url = f"{self._data_source}/{self._API_VERSION}/{self._domain}/{id}/{'metadata' if is_metadata else 'query'}"
#
# create Prefer header
#
prefer_list = []
if self._API_VERSION != "beta":
prefer_list.append("ai.response-thinning=false") # returns data as kusto v1
timeout = options.get("timeout")
if timeout is not None:
prefer_list.append(f"wait={timeout}")
#
# create headers
#
client_version = f"{Constants.MAGIC_CLASS_NAME}.Python.Client:{self._WEB_CLIENT_VERSION}"
client_request_id = f"{Constants.MAGIC_CLASS_NAME}.execute"
client_request_id_tag = options.get("request_id_tag")
if client_request_id_tag is not None:
client_request_id = f"{client_request_id};{client_request_id_tag};{str(uuid.uuid4())}/{self._session_guid}/AzureMonitor"
else:
client_request_id = f"{client_request_id};{str(uuid.uuid4())}/{self._session_guid}/AzureMonitor"
app = f'{Constants.MAGIC_CLASS_NAME};{options.get("notebook_app")}'
app_tag = options.get("request_app_tag")
if app_tag is not None:
app = f"{app};{app_tag}"
request_headers = {
"x-ms-client-version": client_version,
"x-ms-client-request-id": client_request_id,
"x-ms-app": app
}
user_tag = options.get("request_user_tag")
if user_tag is not None:
request_headers["x-ms-user"] = user_tag
if self._aad_helper is not None:
request_headers["Authorization"] = self._aad_helper.acquire_token()
elif self._appkey is not None:
request_headers["x-api-key"] = self._appkey
if len(prefer_list) > 0:
request_headers["Prefer"] = ", ".join(prefer_list)
cache_max_age = options.get("request_cache_max_age")
if cache_max_age is not None:
if cache_max_age > 0:
request_headers["Cache-Control"] = f"max-age={cache_max_age}"
else:
request_headers["Cache-Control"] = "no-cache"
#
# submit request
#
log_request_headers = request_headers
if request_headers.get("Authorization"):
log_request_headers = request_headers.copy()
log_request_headers["Authorization"] = "..."
# collect this inormation, in case bug report will be generated
KqlClient.last_query_info = {
"request": {
"endpoint": api_url,
"headers": log_request_headers,
"timeout": options.get("timeout"),
}
}
if is_metadata:
logger().debug(f"DraftClient::execute - GET request - url: {api_url}, headers: {log_request_headers}, timeout: {options.get('timeout')}")
response = self._http_client.get(api_url, headers=request_headers, timeout=options.get("timeout"))
else:
request_payload = {
"query": query
}
# Implicit Cross Workspace Queries: https://dev.loganalytics.io/oms/documentation/3-Using-the-API/CrossResourceQuery
# workspaces - string[] - A list of workspaces that are included in the query.
if type(options.get("query_properties")) == dict:
resources = options.get("query_properties").get(self.resources_name)
if type(resources) == list and len(resources) > 0:
request_payload[self.resources_name] = resources
timespan = options.get("query_properties").get("timespan")
if type(timespan) == str and len(timespan) > 0:
request_payload["timespan"] = timespan
logger().debug(f"DraftClient::execute - POST request - url: {api_url}, headers: {log_request_headers}, payload: {request_payload}, timeout: {options.get('timeout')}")
# collect this inormation, in case bug report will be generated
self.last_query_info["request"]["payload"] = request_payload # pylint: disable=unsupported-assignment-operation, unsubscriptable-object
response = self._http_client.post(api_url, headers=request_headers, json=request_payload, timeout=options.get("timeout"))
logger().debug(f"DraftClient::execute - response - status: {response.status_code}, headers: {response.headers}, payload: {response.text}")
#
# handle response
#
# collect this inormation, in case bug report will be generated
self.last_query_info["response"] = { # pylint: disable=unsupported-assignment-operation
"status_code": response.status_code
}
if response.status_code < 200 or response.status_code >= 300: # pylint: disable=E1101
try:
parsed_error = json.loads(response.text)
except:
parsed_error = response.text
# collect this inormation, in case bug report will be generated
self.last_query_info["response"]["error"] = parsed_error # pylint: disable=unsupported-assignment-operation, unsubscriptable-object
raise KqlError(response.text, response)
json_response = response.json()
if is_metadata:
kql_response = KqlSchemaResponse(json_response)
else:
kql_response = KqlQueryResponse(json_response)
if kql_response.has_exceptions() and not accept_partial_results:
try:
error_message = json_dumps(kql_response.get_exceptions())
except:
error_message = str(kql_response.get_exceptions())
raise KqlError(error_message, response, kql_response)
return kql_response
| [
2,
16529,
45537,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321,
13,
198,
2,
16529,
35937,
198,
1... | 2.260766 | 4,598 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 8 16:20:20 2020
@author: Eilder Jorge
"""
# To run this, download the BeautifulSoup zip file
# http://www.py4e.com/code3/bs4.zip
# and unzip it in the same directory as this file
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = 'http://py4e-data.dr-chuck.net/comments_768124.html'
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
sum=0
# Retrieve all of the anchor tags
tags = soup('span')
for tag in tags:
sum+=int(tag.contents[0])
print(sum) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
5979,
220,
807,
1467,
25,
1238,
25,
1238,
12131,
198,
198,
31,
9800,
25,
412,
688,
263,
34687,
198,
37811,
198,
198,
2,
1675,
1057,
428,
... | 2.67433 | 261 |
import random
import typing
from gym_super_mario_bros import make
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT
from keras.layers import Dense
from keras.optimizers import Adam
from keras.models import Sequential
from nes_py.wrappers import BinarySpaceToDiscreteSpaceEnv
from numpy import argmax, float32, reshape, uint8
from numpy.random import rand
from skimage.color import rgb2gray
from skimage.transform import resize
Action = typing.Sequence[str]
#RGB = typing.Tuple[int, int, int]
#Screen = typing.Tuple[(RGB,) * 256]
#State = typing.Tuple[(Screen,) * 240]
RGB = typing.Tuple[int]
Screen = typing.Tuple[(RGB,) * 84]
State = typing.Tuple[(Screen,) * 84]
EPISODES = 1000
# self.model.load_weights('./deep_sarsa.h5')
if __name__ == '__main__':
env = make('SuperMarioBros-v0')
env = BinarySpaceToDiscreteSpaceEnv(env, SIMPLE_MOVEMENT)
agent = Agent(84*84, SIMPLE_MOVEMENT)
scores, episodes = [], []
for e in range(EPISODES):
done = False
state = env.reset()
state = downsample(state)
score = 0
while not done:
action = agent.get_action(state)
next_state, reward, done, info = env.step(action)
next_state = downsample(next_state)
next_action = agent.get_action(next_state)
agent.train_model(state, action, reward, next_state, next_action, done)
state = next_state
score += reward
env.render()
if done:
scores.append(score)
episodes.append(e)
print(f"episode: {e}, score: {score}")
if e % 100 == 0:
agent.model.save_weights("./deep_sarsa.h5")
env.close() | [
11748,
4738,
198,
11748,
19720,
198,
198,
6738,
11550,
62,
16668,
62,
3876,
952,
62,
65,
4951,
1330,
787,
198,
6738,
11550,
62,
16668,
62,
3876,
952,
62,
65,
4951,
13,
4658,
1330,
23749,
16437,
62,
44,
8874,
12529,
198,
6738,
41927,
... | 2.304636 | 755 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-11-11 20:58
from __future__ import unicode_literals
from django.db import migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
319,
2864,
12,
1157,
12,
1157,
1160,
25,
3365,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,... | 2.754717 | 53 |
import mysql.connector
import re
| [
11748,
48761,
13,
8443,
273,
201,
198,
11748,
302,
201,
198,
201,
198,
201,
198
] | 2.6 | 15 |
from collections import defaultdict
import logging as log
from utils.AlertGenerator import emit_alert
from db.Models import DataCollector, Issue, AlertType
| [
6738,
17268,
1330,
4277,
11600,
198,
11748,
18931,
355,
2604,
198,
198,
6738,
3384,
4487,
13,
36420,
8645,
1352,
1330,
27588,
62,
44598,
198,
6738,
20613,
13,
5841,
1424,
1330,
6060,
31337,
273,
11,
18232,
11,
23276,
6030,
628,
628
] | 4 | 40 |
import unittest
import json
import random
import string
import os
from unittest.case import SkipTest
import twython
from sneakers.channels import twitter
import sneakers
basePath = os.path.dirname(os.path.abspath(sneakers.__file__))
| [
11748,
555,
715,
395,
198,
11748,
33918,
198,
11748,
4738,
198,
11748,
4731,
198,
11748,
28686,
198,
198,
6738,
555,
715,
395,
13,
7442,
1330,
32214,
14402,
198,
11748,
665,
7535,
198,
198,
6738,
42649,
13,
354,
8961,
1330,
17044,
198,
... | 3.309859 | 71 |
from app import apfell, db_objects
from sanic.response import raw, json
from app.database_models.model import StagingInfo
import base64
import app.crypto as crypt
import json as js
from app.api.callback_api import create_callback_func
import app.database_models.model as db_model
# this is an unprotected API so that agents and c2 profiles can hit this when staging
@apfell.route(apfell.config['API_BASE'] + "/crypto/EKE/<uuid:string>", methods=['POST'])
# this is an unprotected API so that agents and c2 profiles can hit this when staging
@apfell.route(apfell.config['API_BASE'] + "/crypto/aes_psk/<uuid:string>", methods=['POST'])
@apfell.route(apfell.config['API_BASE'] + "/list_crypto_options", methods=['GET']) | [
6738,
598,
1330,
2471,
23299,
11,
20613,
62,
48205,
198,
6738,
5336,
291,
13,
26209,
1330,
8246,
11,
33918,
198,
6738,
598,
13,
48806,
62,
27530,
13,
19849,
1330,
520,
3039,
12360,
198,
11748,
2779,
2414,
198,
11748,
598,
13,
29609,
7... | 3.152838 | 229 |
#!/usr/bin/env python3
"""
Renders index.html
"""
from mako.lookup import TemplateLookup
from pypugjs.ext.mako import preprocessor as pug_preprocessor
from libtales import Tales
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
49,
7338,
6376,
13,
6494,
198,
37811,
628,
198,
6738,
285,
25496,
13,
5460,
929,
1330,
37350,
8567,
929,
198,
6738,
279,
4464,
1018,
8457,
13,
2302,
13,
76,
25496,
13... | 2.833333 | 78 |
API_KEY = "2nqrb7e20f9gy2mp"
api_secret = "24hppoib99gudt5t4glq7yw5m9dr29ax"
# ====================================================
access_token = "GSzV0EA2hdw0ptoCqwz2Cn3Qpz7kBlRI"
| [
198,
17614,
62,
20373,
796,
366,
17,
77,
80,
26145,
22,
68,
1238,
69,
24,
1360,
17,
3149,
1,
198,
15042,
62,
21078,
796,
366,
1731,
71,
16634,
571,
2079,
70,
463,
83,
20,
83,
19,
4743,
80,
22,
88,
86,
20,
76,
24,
7109,
1959,
... | 2.103448 | 87 |
#------------------------------------------------------------------------------
#
# Copyright (c) 2014-2015, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
#------------------------------------------------------------------------------
import unittest
from ..type_registry import LazyRegistry
from .dummies import A, B, C, D, Mixed, Abstract, Concrete, ConcreteSubclass
| [
2,
10097,
26171,
198,
2,
198,
2,
220,
15069,
357,
66,
8,
1946,
12,
4626,
11,
2039,
28895,
11,
3457,
13,
198,
2,
220,
1439,
2489,
10395,
13,
198,
2,
198,
2,
220,
770,
3788,
318,
2810,
1231,
18215,
739,
262,
2846,
286,
262,
347,
... | 4.304878 | 164 |
from typing import Any, Dict, List, Optional
from transformers import T5Tokenizer
| [
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
32233,
198,
198,
6738,
6121,
364,
1330,
309,
20,
30642,
7509,
628
] | 3.818182 | 22 |
# Copyright 2019 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hamcrest import assert_that, equal_to
from netman.core.objects.exceptions import UnknownVlan
from tests.adapters.compliance_test_case import ComplianceTestCase
| [
2,
15069,
13130,
2445,
499,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
... | 3.814433 | 194 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
49962,
739,
262,
17168,
5964,
13,
628
] | 4.352941 | 17 |
from datetime import datetime
from pkg.statistics import get_avg, get_percentiles
from pkg import all_data_file
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
279,
10025,
13,
14269,
3969,
1330,
651,
62,
615,
70,
11,
651,
62,
25067,
2915,
198,
6738,
279,
10025,
1330,
477,
62,
7890,
62,
7753,
198
] | 3.228571 | 35 |
import argparse
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('--path', '-p', default='./validation_results.csv')
args = parser.parse_args()
path = args.path
df = pd.read_csv(path)
md = df.to_markdown(index=False)
print(md)
| [
11748,
1822,
29572,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
48610,
13,
2860,
62,
49140,
10786,
438,
6978,
3256,
705,
12,
79,
3256,
4277,
28,
4458,
14,
12102,
341,
62,... | 2.712766 | 94 |
import random
import time
import os
import signal
import sys
import logging
import argparse
import envirophat
from prometheus_client import Gauge, start_http_server
def _daemonize(pid_file, func, *args):
"""Call func in child process"""
pid = os.fork()
if pid > 0: # Main process
with open(pid_file, 'w') as pid_file:
pid_file.write(str(pid))
sys.exit()
elif pid == 0: # Sub Process
func(*args)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-p', '--port', dest='port', action='store', type=int, default=9090,
help='Port number to start http server. Default: 9090')
parser.add_argument('-i', '--interval', dest='interval', action='store', type=int, default=5,
help='Interval where the daemon get value from sensors. Default: 5 seconds')
parser.add_argument('-d', '--daemon', dest='daemon', action='store_true', default=False,
help='Run job in background. Default: False')
parser.add_argument('-f', '--log-file', dest='logfile', action='store',
default='/var/log/enviro-collectd.log', help='Log file. Default: /var/log/enviro-collectd.log')
parser.add_argument('--debug', dest='debug', action='store_true', default=False,
help='Whether to print debug log. Default: False')
parser.add_argument('--pid', dest='pid_file', action='store', default='/var/run/enviro-collectd.pid',
help='Path to pid file. Default: /var/run/enviro-collectd.pid')
args = parser.parse_args()
# Start up the server to expose the metrics.
log_level = logging.INFO
if args.debug:
log_level = logging.DEBUG
# Configure logging
logging.basicConfig(filename=args.logfile, level=log_level, format='%(asctime)s %(message)s')
enviro_collector = EnviroCollector(port=args.port, interval=args.interval)
signal.signal(signal.SIGTERM, enviro_collector.stop)
signal.signal(signal.SIGINT, enviro_collector.stop)
try:
if args.daemon:
enviro_collector.start_background(args.pid_file)
else:
enviro_collector.start()
finally:
logging.info('Stop collecting data from enviro')
| [
11748,
4738,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
6737,
198,
11748,
25064,
198,
11748,
18931,
198,
11748,
1822,
29572,
198,
198,
11748,
17365,
7058,
746,
265,
198,
6738,
1552,
36916,
62,
16366,
1330,
35094,
469,
11,
923,
62,
... | 2.468288 | 946 |
import os
from abc import ABC, abstractmethod
from anadroid.device.DeviceState import get_known_state_keys, DeviceState
from anadroid.results_analysis.filters.Filters import Filters
from anadroid.utils.Utils import get_resources_dir
DEFAULT_CFG_ANALYZERS_FILE = os.path.join(get_resources_dir(), "config", "analyzer_filters.json")
class AbstractAnalyzer(ABC):
"""Defines a basic interface to be implemented by programs aiming to analyze and produce results about the data
collected during the profiling session and profiled apps.
Attributes:
profiler(Profiler): profiler.
supported_filters(set): default set of filters to validate analyzed results.
validation_filters(set): additional set of filters provided via config file to validate analyzed results.
"""
@abstractmethod
@abstractmethod
def analyze_tests(self, app, results_dir=None, **kwargs):
"""Analyze a set of tests of a given app.
Args:
app(App): app.
results_dir: directory where to store results.
"""
pass
@abstractmethod
def analyze_test(self, app, test_id, **kwargs):
"""Analyze test identified by test_id of a given app.
Args:
app(App): app.
test_id: test uuid.
"""
pass
@abstractmethod
def validate_test(self, app, arg1, **kwargs):
"""validate results of a certain test."""
return True
@abstractmethod
def get_supported_filters(self):
"""return set of supported filters."""
return self.supported_filters
def supports_filter(self, filter_name):
"""check if a given filter is supported.
Args:
filter_name: name of the filter.
Returns:
bool: True if supported, False otherwise.
"""
return filter_name in self.supported_filters
@abstractmethod
def validate_filters(self):
"""validate supported filters."""
return True
@abstractmethod
def clean(self):
"""clean previous results."""
pass
@abstractmethod
def get_val_for_filter(self, filter_name, add_data=None):
"""get correspondent value of a given filter
Args:
filter_name: name of the filter.
Returns:
value: filter value.
"""
if filter_name in get_known_state_keys():
ds = DeviceState(self.profiler.device)
return ds.get_state(filter_name)
else:
return None
| [
11748,
28686,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
198,
6738,
281,
324,
3882,
13,
25202,
13,
24728,
9012,
1330,
651,
62,
4002,
62,
5219,
62,
13083,
11,
16232,
9012,
198,
6738,
281,
324,
3882,
13,
43420,
62,
20930,
... | 2.516353 | 1,009 |
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import asyncio
from typing import Any, Optional
from multiprocessing import connection
from .threadless import Threadless
class BaseRemoteExecutor(Threadless[connection.Connection]):
"""A threadless executor implementation which receives work over a connection."""
@property
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
15741,
13,
9078,
198,
220,
220,
220,
220,
15116,
198,
220,
220,
220,
2343,
248,
94,
158,
248,
94,
158,
248,
94,
12549,
11,
4401,
6551,
11,... | 3.423913 | 184 |
#!/usr/bin/env python
cisco_dev1 = {'device_type': 'cisco_ios',
'ip': '184.105.247.70',
'username': 'pyclass'
'password': '88newclass'}
cisco_dev2 = {'device_type': 'cisco_ios',
'ip': '184.105.247.71',
'username': 'pyclass'
'password': '88newclass'}
arista_dev1 = {'device_type': 'arista_eos',
'ip': '184.105.247.72',
'username': 'admin1'
'password': '99saturday'}
arista_dev2 = {'device_type': 'arista_eos',
'ip': '184.105.247.73',
'username': 'admin1'
'password': '99saturday'}
arista_dev3 = {'device_type': 'arista_eos',
'ip': '184.105.247.74',
'username': 'admin1'
'password': '99saturday'}
arista_dev4 = {'device_type': 'arista_eos',
'ip': '184.105.247.75',
'username': 'admin1'
'password': '99saturday'}
juniper_dev1 = {'device_type': 'juniper_junos',
'ip': '184.105.247.76',
'username': 'pyclass'
'password': '88newclass'}
devices = [cisco_dev1,
cisco_dev2,
arista_dev1,
arista_dev2,
arista_dev3,
arista_dev4,
juniper_dev1]
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
66,
4861,
62,
7959,
16,
796,
1391,
6,
25202,
62,
4906,
10354,
705,
66,
4861,
62,
4267,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
705,
541,
10354,
... | 1.736413 | 736 |
'''
Create an RSS feed
'''
import os
import sys
# point this to the folder where mdiocre.py is located
sys.path.append(os.path.abspath('..'))
from mdiocre.core import MDiocre
from mdiocre.wizard import Wizard
from feedgen import feed
import datetime
# directory where the blog files are
BLOG_DIR = "source/blog"
WEBSITE_NAME = "My Website"
WEBSITE_AUTHOR = "Joe Bloggs"
WEBSITE_LANG = "en"
AUTHOR_EMAIL = "something@example.com"
WEBSITE_LINK = 'http://example.com'
RSS_LINK = 'http://example.com/feed.rss'
FEED_DESCRIPTION = "This is my feed"
if __name__ == '__main__':
# feed info
fg = feed.FeedGenerator()
fg.title(WEBSITE_NAME)
fg.description(FEED_DESCRIPTION)
fg.author( {'name':WEBSITE_AUTHOR,'email':AUTHOR_EMAIL} )
fg.language(WEBSITE_LANG)
fg.generator('python-feedgen (MDiocre v.3.1)')
# feed links
fg.link(href=WEBSITE_LINK)
fg.link(href=RSS_LINK, rel='self', type='application/rss+xml')
# set up MDiocre and file list
m = MDiocre()
blog_files = [i for i in os.listdir(BLOG_DIR) \
if \
(i.lower().endswith('.md') or i.lower().endswith('.rst'))
and not os.path.splitext(i.lower())[0].startswith('index')]
# make entry for each file
for f in blog_files:
file_path = os.path.join(BLOG_DIR, f)
file_name, file_ext = os.path.splitext(f)
# find suitable converter
file_ext = file_ext[1:].lower()
m.switch_parser(Wizard.converters[file_ext])
# read file
with open(file_path, 'r') as content:
content_vars = m.process(content.read())
# prepare feed entry
fe = fg.add_entry()
# set title, defined by e.g. <!--:title = "My First Blog Post" -->
if content_vars.get('title') != '':
blog_title = content_vars.get('title')
else:
blog_title = file_name
# set date, defined by e.g. <!--:date = "2020-09-09" -->
blog_pub = content_vars.get("date")
blog_pub = datetime.datetime.strptime(blog_pub, '%Y-%m-%d')
tz_d = datetime.timedelta(hours=0)
tz_ = datetime.timezone(tz_d, name="gmt")
blog_pub = blog_pub.replace(tzinfo=tz_)
# set feed content
blog_content = content_vars.get("content")
link = "{}/{}.html".format(WEBSITE_LINK, file_name)
# fill feed entry
fe.title(blog_title)
fe.description(blog_content)
fe.link(href=link)
fe.published(blog_pub)
# print out the rss feed
print(fg.rss_str(pretty=True).decode(encoding='utf-8'))
| [
7061,
6,
198,
16447,
281,
25012,
3745,
220,
198,
7061,
6,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
2,
966,
428,
284,
262,
9483,
810,
285,
10989,
27945,
13,
9078,
318,
5140,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
... | 2.358641 | 1,001 |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 21, 2020
@author: alfoa, wangc
SGD Regressor
"""
#Internal Modules (Lazy Importer)--------------------------------------------------------------------
#Internal Modules (Lazy Importer) End----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from SupervisedLearning.ScikitLearn import ScikitLearnBase
from utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class SGDRegressor(ScikitLearnBase):
"""
SGD Regressor
"""
info = {'problemtype':'regression', 'normalize':True}
def __init__(self):
"""
Constructor that will appropriately initialize a supervised learning object
@ In, None
@ Out, None
"""
super().__init__()
import sklearn
import sklearn.linear_model
self.model = sklearn.linear_model.SGDRegressor
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(SGDRegressor, cls).getInputSpecification()
specs.description = r"""The \xmlNode{SGDRegressor} implements regularized linear models with stochastic
gradient descent (SGD) learning for regression: the gradient of the loss is estimated each sample at
a time and the model is updated along the way with a decreasing strength schedule
(aka learning rate). For best results using the default learning rate schedule, the
data should have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays of floating
point values for the features. The model it fits can be controlled with the loss parameter;
by default, it fits a linear support vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model parameters towards
the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a
combination of both (Elastic Net). If the parameter update crosses the 0.0 value because
of the regularizer, the update is truncated to $0.0$ to allow for learning sparse models and
achieve online feature selection.
This implementation works with data represented as dense arrays of floating point values for the features.
\zNormalizationPerformed{SGDRegressor}
"""
specs.addSub(InputData.parameterInputFactory("loss", contentType=InputTypes.makeEnumType("loss", "lossType",['squared_loss', 'huber','epsilon_insensitive','squared_epsilon_insensitive']),
descr=r"""The loss function to be used.
The ``squared\_loss'' refers to the ordinary least squares fit. ``huber'' modifies ``squared\_loss'' to focus less on getting outliers correct by
switching from squared to linear loss past a distance of epsilon. ``epsilon\_insensitive'' ignores errors less than epsilon and is linear past
that; this is the loss function used in SVR. ``squared\_epsilon\_insensitive'' is the same but becomes squared loss past a tolerance of epsilon.
""", default='squared_loss'))
specs.addSub(InputData.parameterInputFactory("penalty", contentType=InputTypes.makeEnumType("penalty", "penaltyType",['l2', 'l1', 'elasticnet']),
descr=r"""The penalty (aka regularization term) to be used. Defaults to ``l2'' which is the standard regularizer for linear SVM models.
``l1'' and ``elasticnet'' might bring sparsity to the model (feature selection) not achievable with ``l2''.""", default='l2'))
specs.addSub(InputData.parameterInputFactory("alpha", contentType=InputTypes.FloatType,
descr=r"""Constant that multiplies the regularization term. The higher the value, the stronger the regularization. Also used to compute
the learning rate when set to learning_rate is set to ``optimal''.""", default=0.0001))
specs.addSub(InputData.parameterInputFactory("l1_ratio", contentType=InputTypes.FloatType,
descr=r"""The Elastic Net mixing parameter, with $0 <= l1\_ratio <= 1$. $l1\_ratio=0$ corresponds to L2 penalty, $l1\_ratio=1$ to L1.
Only used if penalty is ``elasticnet''.""", default=0.15))
specs.addSub(InputData.parameterInputFactory("fit_intercept", contentType=InputTypes.BoolType,
descr=r"""Whether the intercept should be estimated or not. If False,
the data is assumed to be already centered.""", default=True))
specs.addSub(InputData.parameterInputFactory("max_iter", contentType=InputTypes.IntegerType,
descr=r"""The maximum number of passes over the training data (aka epochs).""", default=1000))
specs.addSub(InputData.parameterInputFactory("tol", contentType=InputTypes.FloatType,
descr=r"""The stopping criterion. If it is not None, training will stop when $(loss > best\_loss - tol)$ for $n\_iter\_no\_change$
consecutive epochs.""", default=1e-3))
specs.addSub(InputData.parameterInputFactory("shuffle", contentType=InputTypes.BoolType,
descr=r"""TWhether or not the training data should be shuffled after each epoch """, default=True))
specs.addSub(InputData.parameterInputFactory("epsilon", contentType=InputTypes.FloatType,
descr=r"""Epsilon in the epsilon-insensitive loss functions; only if loss is ``huber'', ``epsilon\_insensitive'', or
``squared\_epsilon\_insensitive''. For ``huber'', determines the threshold at which it becomes less important to get the
prediction exactly right. For epsilon-insensitive, any differences between the current prediction and the correct label
are ignored if they are less than this threshold.""", default=0.1))
specs.addSub(InputData.parameterInputFactory("learning_rate", contentType=InputTypes.makeEnumType("learning_rate", "learningType",['constant', 'optimal', 'invscaling','adaptive']),
descr=r"""The learning rate schedule:
\begin{itemize}
\item constant: $eta = eta0$
\item optimal: $eta = 1.0 / (alpha * (t + t0))$ where t0 is chosen by a heuristic proposed by Leon Bottou.
\item invscaling: $eta = eta0 / pow(t, power\_t)$
\item adaptive: $eta = eta0$, as long as the training keeps decreasing. Each time n\_iter\_no\_change consecutive epochs fail
to decrease the training loss by tol or fail to increase validation score by tol if early\_stopping is True, the current
learning rate is divided by 5.
\end{itemize}
""", default='optimal'))
specs.addSub(InputData.parameterInputFactory("eta0", contentType=InputTypes.FloatType,
descr=r"""The initial learning rate for the ``constant'', ``invscaling'' or ``adaptive'' schedules. The default value is 0.0
as eta0 is not used by the default schedule ``optimal''.""", default=0.0))
specs.addSub(InputData.parameterInputFactory("power_t", contentType=InputTypes.FloatType,
descr=r"""The exponent for inverse scaling learning rate.""", default=0.5))
specs.addSub(InputData.parameterInputFactory("early_stopping", contentType=InputTypes.BoolType,
descr=r"""hether to use early stopping to terminate training when validation score is not
improving. If set to True, it will automatically set aside a stratified fraction of training
data as validation and terminate training when validation score is not improving by at least
tol for n\_iter\_no\_change consecutive epochs.""", default=False))
specs.addSub(InputData.parameterInputFactory("validation_fraction", contentType=InputTypes.FloatType,
descr=r"""The proportion of training data to set aside as validation set for early stopping.
Must be between 0 and 1. Only used if early\_stopping is True.""", default=0.1))
specs.addSub(InputData.parameterInputFactory("n_iter_no_change", contentType=InputTypes.IntegerType,
descr=r"""Number of iterations with no improvement to wait before early stopping.""", default=5))
specs.addSub(InputData.parameterInputFactory("random_state", contentType=InputTypes.IntegerType,
descr=r"""Used to shuffle the training data, when shuffle is set to
True. Pass an int for reproducible output across multiple function calls.""",
default=None))
specs.addSub(InputData.parameterInputFactory("verbose", contentType=InputTypes.IntegerType,
descr=r"""The verbosity level""", default=0))
specs.addSub(InputData.parameterInputFactory("warm_start", contentType=InputTypes.BoolType,
descr=r"""When set to True, reuse the solution of the previous call
to fit as initialization, otherwise, just erase the previous solution.""", default=False))
specs.addSub(InputData.parameterInputFactory("average", contentType=InputTypes.BoolType,
descr=r"""When set to True, computes the averaged SGD weights accross
all updates and stores the result in the coef_ attribute.""", default=False))
return specs
def _handleInput(self, paramInput):
"""
Function to handle the common parts of the distribution parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
settings, notFound = paramInput.findNodesAndExtractValues(['loss','penalty','alpha','l1_ratio','fit_intercept',
'max_iter','tol','shuffle','epsilon', 'learning_rate',
'eta0','power_t','early_stopping','validation_fraction',
'n_iter_no_change', 'random_state', 'verbose', 'warm_start',
'average'])
# notFound must be empty
assert(not notFound)
self.initializeModel(settings)
| [
2,
15069,
2177,
12350,
13485,
6682,
10302,
11,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,... | 2.280856 | 5,793 |
#!/usr/bin/env python
import code
import cpp
import cpp_file_parser
import file_parser
import parser_addition
import to_string
import util
import os
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
2438,
198,
11748,
269,
381,
198,
11748,
269,
381,
62,
7753,
62,
48610,
198,
11748,
2393,
62,
48610,
198,
11748,
30751,
62,
2860,
653,
198,
11748,
284,
62,
8841,
198,
11748,... | 3.037037 | 54 |
import os
import gym
from stable_baselines.common.policies import MlpPolicy
from stable_baselines import PPO2
from stable_baselines.common import make_vec_env
from TetrisBattle.envs.tetris_env import TetrisSingleEnv
# load env var
CASE_NAME = get_var_from_env("CASE_NAME", "ppo2_tetris_test")
TRAIN_STEPS = int(float(get_var_from_env("TRAIN_STEPS", "1e5")))
TEST_STEPS = int(float(get_var_from_env("TEST_STEPS", "1e3")))
VERBOSE = int(get_var_from_env("VERBOSE", "1"))
TENSORBOARD_LOG_PATH = get_var_from_env("TENSORBOARD_LOG_PATH", "./tensorboard/" + CASE_NAME)
MODEL_OUTPUT_PATH = get_var_from_env("MODEL_OUTPUT_PATH", "/out/ppo2_tetris_test")
GRIDCHOICE = get_var_from_env("GRIDCHOICE", "none")
os.makedirs(TENSORBOARD_LOG_PATH, exist_ok=True)
env = make_vec_env(TetrisSingleEnv, n_envs=1, env_kwargs={"gridchoice": GRIDCHOICE, "obs_type": "grid", "mode": "rgb_array"})
# Train the agent
model = PPO2(MlpPolicy, env, verbose=1, nminibatches=4, tensorboard_log=TENSORBOARD_LOG_PATH)
model.learn(total_timesteps=TRAIN_STEPS)
model.save(MODEL_OUTPUT_PATH)
del model # remove to demonstrate saving and loading
# Test
env = TetrisSingleEnv(gridchoice=GRIDCHOICE, obs_type="grid", mode="rgb_array")
model = PPO2.load(MODEL_OUTPUT_PATH)
obs = env.reset()
t = 0
while t < TEST_STEPS:
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
t += 1
print("SUCCESS") | [
198,
11748,
28686,
198,
11748,
11550,
198,
198,
6738,
8245,
62,
12093,
20655,
13,
11321,
13,
79,
4160,
444,
1330,
337,
34431,
36727,
198,
6738,
8245,
62,
12093,
20655,
1330,
350,
16402,
17,
198,
6738,
8245,
62,
12093,
20655,
13,
11321,
... | 2.408547 | 585 |
import hmac
from hashlib import sha1
from sys import argv
from time import time
from datetime import date
storeurl = flow.getVariable("Url")
expirytime = flow.getVariable("expirytime")
containerName = flow.getVariable("containerName")
date_now = str(date.today().isoformat())
url = storeurl + '/' + containerName + '/' + 'new_location/date/' + date_now + '.xml'
flow.setVariable("redirect_url",url) | [
11748,
289,
20285,
198,
6738,
12234,
8019,
1330,
427,
64,
16,
198,
6738,
25064,
1330,
1822,
85,
198,
6738,
640,
1330,
640,
198,
6738,
4818,
8079,
1330,
3128,
628,
198,
8095,
6371,
796,
5202,
13,
1136,
43015,
7203,
28165,
4943,
198,
10... | 3.115385 | 130 |
# due to phone contact is bound to profile since we need preload phone contact data from some nasty way
# which means I have to bind PhoneContact to Profile
# which means this friend app should have be a child app in the account app
# friend module has many logic based on account app...
# which means I prefer profileId over userId | [
2,
2233,
284,
3072,
2800,
318,
5421,
284,
7034,
1201,
356,
761,
662,
2220,
3072,
2800,
1366,
422,
617,
17166,
835,
198,
2,
543,
1724,
314,
423,
284,
11007,
14484,
17829,
284,
13118,
198,
2,
543,
1724,
428,
1545,
598,
815,
423,
307,
... | 4.453333 | 75 |
from typing import Dict, List, Optional
| [
6738,
19720,
1330,
360,
713,
11,
7343,
11,
32233,
628
] | 4.1 | 10 |
"""
Module for Edible SceneElement
"""
from abc import ABC
from typing import Optional, Union
from ..element import InteractiveElement
from ...common.definitions import ElementTypes, CollisionTypes
from ...configs.parser import parse_configuration
# pylint: disable=line-too-long
class Edible(InteractiveElement, ABC):
"""
Base class for edible Scene Elements.
Once eaten by an agent, the SceneElement shrinks in size, mass, and available reward.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self,
reward: float,
shrink_ratio: float,
min_reward: float,
config_key: Optional[Union[ElementTypes, str]] = None,
**entity_params):
"""
Edible entity provides a reward to the agent that eats it, then shrinks in size, mass, and available reward.
Args:
**entity_params: other params to configure SceneElement. Refer to Entity class.
Keyword Args:
shrink_ratio_when_eaten: When eaten by an agent, the mass, size, and reward are multiplied by this ratio.
Default: 0.9
initial_reward: Initial reward of the edible.
min_reward: When reward is lower than min_reward, the edible entity disappears.
"""
default_config = parse_configuration('element_activable', config_key)
entity_params = {**default_config, **entity_params}
super().__init__(visible_shape=True,
invisible_shape=True,
reward=reward,
**entity_params)
self._entity_params = entity_params
self._shrink_ratio = shrink_ratio
self._min_reward = min_reward
@property
class Apple(Edible):
""" Edible entity that provides a positive reward
Default: Green Circle of radius 10, with an initial_reward of 30,
a min reward of 5, and a shrink_ratio of 0.9.
"""
class RottenApple(Edible):
""" Edible entity that provides a positive reward
Default: Green Circle of radius 10, with an initial_reward of 30,
a min reward of 5, and a shrink_ratio of 0.9.
"""
| [
37811,
198,
26796,
329,
1717,
856,
28315,
20180,
198,
37811,
198,
6738,
450,
66,
1330,
9738,
198,
6738,
19720,
1330,
32233,
11,
4479,
198,
198,
6738,
11485,
30854,
1330,
21365,
20180,
198,
6738,
2644,
11321,
13,
4299,
50101,
1330,
11703,
... | 2.549769 | 864 |
import hashlib
import random
| [
11748,
12234,
8019,
198,
11748,
4738,
628,
198
] | 3.875 | 8 |
import pyautogui
width, height= pyautogui.size()
print(f"{width} x {height}") | [
11748,
12972,
2306,
519,
9019,
198,
198,
10394,
11,
6001,
28,
12972,
2306,
519,
9019,
13,
7857,
3419,
198,
4798,
7,
69,
1,
90,
10394,
92,
2124,
1391,
17015,
92,
4943
] | 2.516129 | 31 |
# -*- coding: utf-8 -*-
# License: BSD 2 clause
import os
import torch
import torch.nn as nn
import pickle
import warnings
import torchvision.models as models
from ._loss import callLoss
from ._dlbase import BaseControler
from pyhealth.data.data_reader.ecg import mina_reader
from collections import OrderedDict
from torch import Tensor
import torch.nn.functional as F
from torch import Tensor
from torch.autograd import Variable
import numpy as np
warnings.filterwarnings('ignore')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
13789,
25,
347,
10305,
362,
13444,
198,
198,
11748,
28686,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
2298,
293,
198,
11748,
1460... | 3.381944 | 144 |
# -*- coding:utf-8 -*-
from selenium import webdriver
import time,json
driver = webdriver.Chrome()
driver.get('https://www.baidu.com')
driver.find_element_by_xpath('//*[@id="account"]').clear()
driver.find_element_by_xpath('//*[@id="account"]').send_keys('1192328490@qq.com')
time.sleep(2)
driver.find_element_by_xpath('//*[@id="pwd"]').clear()
driver.find_element_by_xpath('//*[@id="pwd"]').send_keys('password')
time.sleep(2)
driver.find_element_by_xpath('//*[@id="loginForm"]/div[3]/label').click()#点击
time.sleep(2)
driver.find_element_by_xpath('//*[@id="loginBt"]').click()
time.sleep(15)
cookies = driver.get_cookies()
cookie = {}
for items in cookies:
cookie[items.get('name')] = items.get('value')
with open('cookies.txt','wb') as file:
file.write(json.dumps(cookie))
driver.close()
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
11748,
640,
11,
17752,
198,
198,
26230,
796,
3992,
26230,
13,
1925,
5998,
3419,
198,
26230,
13,
1136,
10786,
5450,
1378,
... | 2.443769 | 329 |
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
| [
6738,
1366,
62,
43681,
13,
27604,
13,
9503,
1746,
1330,
7308,
55,
8439,
11522,
17818,
42350,
34,
21370,
3546,
26634,
628
] | 4 | 21 |
from AutoClean.AutoClean import AutoClean | [
6738,
11160,
32657,
13,
27722,
32657,
1330,
11160,
32657
] | 4.555556 | 9 |
import json
import os
import hcl
import sh
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
289,
565,
198,
11748,
427,
628,
628,
628,
628,
628,
628,
628,
628
] | 2.9 | 20 |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Tramonto(CMakePackage):
"""Tramonto: Software for Nanostructured Fluids in Materials and Biology"""
homepage = "https://software.sandia.gov/tramonto/"
git = "https://github.com/Tramonto/Tramonto.git"
version('develop', branch='master')
depends_on('trilinos@:12+nox')
| [
2,
15069,
2211,
12,
1238,
1828,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
... | 2.994253 | 174 |
"""
Trying to construct test case to reproduce
https://github.com/pytti-tools/pytti-core/issues/82
"""
from omegaconf import OmegaConf
from pytti.workhorse import _main as render_frames
from pathlib import Path
video_fpath = str(next(Path(".").glob("**/assets/*.mp4")))
print(video_fpath)
params = {
# "scenes": "sunlight:3_testmasktest.mp4 | midnight:3_-testmasktest.mp4",
"scenes": "sunlight",
"scene_prefix": "",
"scene_suffix": "",
"interpolation_steps": 0,
"steps_per_scene": 100, # 4530,
"direct_image_prompts": "",
"init_image": "",
"direct_init_weight": "",
"semantic_init_weight": "",
"image_model": "Limited Palette",
"width": 360,
"height": 640,
"pixel_size": 1,
"smoothing_weight": 0.02,
"vqgan_model": "sflckr",
"random_initial_palette": False,
"palette_size": 6,
"palettes": 9,
"gamma": 1,
"hdr_weight": 0.01,
"palette_normalization_weight": 0.2,
"show_palette": False,
"target_palette": "",
"lock_palette": False,
"animation_mode": "Video Source",
"sampling_mode": "bilinear",
"infill_mode": "smear",
"pre_animation_steps": 0,
"steps_per_frame": 10,
"frames_per_second": 12,
"direct_stabilization_weight": "", # "testmasktest.mp4",
"semantic_stabilization_weight": "",
"depth_stabilization_weight": "",
"edge_stabilization_weight": "",
"flow_stabilization_weight": "", # "testmasktest.mp4",
"video_path": video_fpath, # "testmasktest.mp4",
"frame_stride": 1,
"reencode_each_frame": False,
"flow_long_term_samples": 1,
"translate_x": "0",
"translate_y": "0",
"translate_z_3d": "0",
"rotate_3d": "[1,0,0,0]",
"rotate_2d": "0",
"zoom_x_2d": "0",
"zoom_y_2d": "0",
"lock_camera": True,
"field_of_view": 60,
"near_plane": 1,
"far_plane": 10000,
"file_namespace": "default",
"allow_overwrite": False,
"display_every": 10,
"clear_every": 0,
"display_scale": 1,
"save_every": 10,
"backups": 5,
"show_graphs": False,
"approximate_vram_usage": False,
"ViTB32": True,
"ViTB16": False,
"RN50": False,
"RN50x4": False,
"ViTL14": False,
"RN101": False,
"RN50x16": False,
"RN50x64": False,
"learning_rate": None,
"reset_lr_each_frame": True,
"seed": 15291079827822783929,
"cutouts": 40,
"cut_pow": 2,
"cutout_border": 0.25,
"gradient_accumulation_steps": 1,
"border_mode": "clamp",
"models_parent_dir": ".",
}
def test_issue83():
"""
Reproduce https://github.com/pytti-tools/pytti-core/issues/82
"""
cfg = OmegaConf.create(params)
render_frames(cfg)
| [
37811,
198,
51,
14992,
284,
5678,
1332,
1339,
284,
22919,
198,
220,
3740,
1378,
12567,
13,
785,
14,
9078,
35671,
12,
31391,
14,
9078,
35671,
12,
7295,
14,
37165,
14,
6469,
198,
37811,
198,
198,
6738,
267,
28917,
7807,
69,
1330,
19839,... | 2.192496 | 1,226 |
from django.conf.urls import patterns, url
from affiliates.facebook import views
from affiliates.base.views import handler404, handler500
urlpatterns = patterns('affiliates.facebook.views',
url(r'^/?$', views.load_app, name='facebook.load_app'),
url(r'^pre_auth/?$', views.pre_auth_promo, name='facebook.pre_auth_promo'),
url(r'^banners/new/?$', views.banner_create,
name='facebook.banner_create'),
url(r'^banners/?$', views.banner_list, name='facebook.banner_list'),
url(r'^banners/(\d+)/create_image_check/?$',
views.banner_create_image_check,
name='facebook.banners.create_image_check'),
url(r'^banners/(\d+)/share/?$', views.banner_share,
name='facebook.banners.share'),
url(r'^post_banner_share/?$', views.post_banner_share,
name='facebook.post_banner_share'),
url(r'^banners/delete/?$', views.banner_delete,
name='facebook.banners.delete'),
url(r'^links/create/?$', views.link_accounts,
name='facebook.link_accounts'),
url(r'^links/([0-9A-Za-z]+-[0-9A-Za-z]+)/activate/?$', views.activate_link,
name='facebook.links.activate'),
url(r'^links/remove/?$', views.remove_link,
name='facebook.links.remove'),
url(r'^banners/(\d+)/link/?$', views.follow_banner_link,
name='facebook.banners.link'),
url(r'^leaderboard/?$', views.leaderboard, name='facebook.leaderboard'),
url(r'^faq/?$', views.faq, name='facebook.faq'),
url(r'^invite/?$', views.invite, name='facebook.invite'),
url(r'^invite/done/?$', views.post_invite, name='facebook.post_invite'),
url(r'^newsletter/subscribe/?$', views.newsletter_subscribe,
name='facebook.newsletter.subscribe'),
url(r'^stats/(\d+|:year:)/(\d+|:month:)/?$', views.stats, name='facebook.stats'),
url(r'^deauthorize/?$', views.deauthorize, name='facebook.deauthorize'),
url(r'^404/?$', handler404, name='facebook.404'),
url(r'^500/?$', handler500, name='facebook.500'),
url(r'^safari_workaround/?$', views.safari_workaround,
name='facebook.safari_workaround'),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
19016,
198,
198,
6738,
29116,
13,
19024,
1330,
5009,
198,
6738,
29116,
13,
8692,
13,
33571,
1330,
21360,
26429,
11,
21360,
4059,
628,
198,
6371,
33279,
82,
796,
7572,
10786,
... | 2.426267 | 868 |
import ravendb
import unittest
import test_base
| [
11748,
24343,
437,
65,
201,
198,
11748,
555,
715,
395,
201,
198,
11748,
1332,
62,
8692,
201
] | 2.941176 | 17 |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 14 14:13:49 2018
@author: srivastavau
"""
import psutil as psu
import pandas as pd
import time
print("What is happening to my CPU...?")
time.sleep(3)
ob=mcpu()
print("\n\t \tVirtual Memory Info\n")
ob.vm()
print("\n\t \tHard Disk Info\n")
ob.du()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3300,
3158,
1478,
1478,
25,
1485,
25,
2920,
2864,
201,
198,
201,
198,
31,
9800,
25,
264,
15104,
459,
615,
559,
201,
198,
37811,
201,
... | 2.235714 | 140 |
from django.contrib import admin
from django.urls import path, include
import debug_toolbar
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('aircheck.urls')),
path('__debug__', include(debug_toolbar.urls)),
]
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
11748,
14257,
62,
25981,
5657,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
28482,
14,
3256,
13... | 2.788235 | 85 |
import numpy as np
import torch
def compute_iou(occ1, occ2):
''' Computes the Intersection over Union (IoU) value for two sets of
occupancy values.
Args:
occ1 (tensor): first set of occupancy values
occ2 (tensor): second set of occupancy values
'''
occ1 = np.asarray(occ1)
occ2 = np.asarray(occ2)
# Put all data in second dimension
# Also works for 1-dimensional data
if occ1.ndim >= 2:
occ1 = occ1.reshape(occ1.shape[0], -1)
if occ2.ndim >= 2:
occ2 = occ2.reshape(occ2.shape[0], -1)
# Convert to boolean values
occ1 = (occ1 >= 0.5)
occ2 = (occ2 >= 0.5)
# Compute IOU
area_union = (occ1 | occ2).astype(np.float32).sum(axis=-1)
area_intersect = (occ1 & occ2).astype(np.float32).sum(axis=-1)
iou = (area_intersect / area_union)
return iou
def make_3d_grid(bb_min, bb_max, shape):
''' Makes a 3D grid.
Args:
bb_min (tuple): bounding box minimum
bb_max (tuple): bounding box maximum
shape (tuple): output shape
'''
size = shape[0] * shape[1] * shape[2]
pxs = torch.linspace(bb_min[0], bb_max[0], shape[0])
pys = torch.linspace(bb_min[1], bb_max[1], shape[1])
pzs = torch.linspace(bb_min[2], bb_max[2], shape[2])
pxs = pxs.view(-1, 1, 1).expand(*shape).contiguous().view(size)
pys = pys.view(1, -1, 1).expand(*shape).contiguous().view(size)
pzs = pzs.view(1, 1, -1).expand(*shape).contiguous().view(size)
p = torch.stack([pxs, pys, pzs], dim=1)
return p | [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
628,
198,
4299,
24061,
62,
72,
280,
7,
13966,
16,
11,
1609,
17,
2599,
198,
220,
220,
220,
705,
7061,
3082,
1769,
262,
4225,
5458,
625,
4479,
357,
40,
78,
52,
8,
1988,
329,
734,
562... | 2.195435 | 701 |
from goscale.cms_plugins import GoscaleCMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
import models
GOSCALE_FEEDS_PLUGIN_TEMPLATES = getattr(settings, 'GOSCALE_FEEDS_PLUGIN_TEMPLATES', (
('posts.html', _('Posts')),
('posts_small.html', _('Small posts (sidebar)')),
)) + getattr(settings, 'GOSCALE_FEEDS_CUSTOM_PLUGIN_TEMPLATES', ())
class FeedPlugin(GoscaleCMSPluginBase):
"""
Feed plugin for GoScale
"""
model = models.Feed
name = _("RSS Feed")
plugin_templates = GOSCALE_FEEDS_PLUGIN_TEMPLATES
render_template = GOSCALE_FEEDS_PLUGIN_TEMPLATES[0][0]
fieldsets = [
[_('Feed options'), {
'fields': ['url', 'page_size', 'show_date', 'external_links', 'disqus']
}]
]
plugin_pool.register_plugin(FeedPlugin)
class BloggerPlugin(FeedPlugin):
"""
Blogger plugin for GoScale
"""
model = models.Blogger
name = _("Blogger")
fieldsets = [
[_('Feed options'), {
'fields': ['url', 'page_size', 'label', 'show_date', 'external_links', 'disqus']
}]
]
plugin_pool.register_plugin(BloggerPlugin)
class TumblrPlugin(BloggerPlugin):
"""
Feed plugin for GoScale
"""
model = models.Tumblr
name = _("Tumblr")
plugin_pool.register_plugin(TumblrPlugin) | [
6738,
308,
17500,
1000,
13,
46406,
62,
37390,
1330,
402,
17500,
1000,
34,
5653,
37233,
14881,
198,
6738,
269,
907,
13,
33803,
62,
7742,
1330,
13877,
62,
7742,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
7... | 2.426316 | 570 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function )
import codecs
import pandas as pd
from jinja2 import Environment, PackageLoader
from flask import Flask, render_template_string
from .utilities import latitude,longitude, map_templates
from .check_data import check_column, check_center, check_projection
class BaseMap(object):
''' Check DataFrame Accuracy And Setup Maps '''
def __init__(self, df, width=960, height=500, map="world_map",
center=None, projection="mercator", title= None):
'''
The BaseMap class is here to handle all of the generic aspects of
setting up a Latitude and Longitude based map. These aspects are:
1. Verifying the Pandas Dataframe (lat/long columns, NAs)
2. Setting up and holding template information.
This is a private class used by other classes but not the User.
Parameters
----------
df: pandas dataframe, required.
dataframe with latitude and longitude columns.
width: int, default 960
Width of the map.
height: int, default 500
Height of the map.
map: str, default "world_map".
template to be used for mapping.
projection: str, default="mercator"
a projection that is one of the projecions recognized by d3.js
center: tuple or list of two. default=None
a projection that is one of the projecions recognized by d3.js
'''
# Check Inputs For Bad or Inconsistent Data
assert isinstance(df, pd.core.frame.DataFrame)
self.df = df
self.lat = check_column(self.df, latitude, 'latitude')
self.lon = check_column(self.df, longitude, 'longitude')
self.map = map
self.center= check_center(center)
self.projection = check_projection(projection)
self.title=title
#Template Information Here
self.env = Environment(loader=PackageLoader('quickD3map', 'templates'))
self.template_vars = {'width': width, 'height': height, 'center': self.center,
'projection':self.projection, "title":self.title}
#add all template combinations. Specify Template Subsets in map classes
self.map_templates = map_templates
#JS Libraries and CSS Styling
self.template_vars['d3_projection'] = self.env.get_template('d3.geo.projection.v0.min.js').render()
self.template_vars['topojson'] = self.env.get_template('topojson.v1.min.js').render()
self.template_vars['d3js'] = self.env.get_template('d3.v3.min.js').render()
self.template_vars['style'] = self.env.get_template('style.css').render()
self.template_vars['colorbrewer_css'] = self.env.get_template('colorbrewer.css').render()
self.template_vars['colorbrewer_js'] = self.env.get_template('colorbrewer.js').render()
## Display Methods
########################################################################################
def build_map(self):
'''Build HTML/JS/CSS from Templates given current map type'''
self.convert_to_geojson()
map = self.env.get_template( self.map_templates[self.map]['json'] )
self.template_vars['map_data'] = map.render()
#generate html
html_templ = self.env.get_template(self.map_templates[self.map]['template'])
self.HTML = html_templ.render(self.template_vars)
def create_map(self, path='map.html'):
''' utility function used by all map classes
to write Map to file
Parameters:
-----------
path: string, default 'map.html'
Path for HTML output for map
'''
self.build_map()
with codecs.open(path, 'w') as f:
f.write(self.HTML)
def display_map(self):
''' utility function used by all map classes
to display map. Creates a Flask App.
Down the line maybe an IPython Widget as well?
'''
app = Flask(__name__)
self.build_map()
@app.route('/')
app.run() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
357,
48546,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
1267,
198,
198,
11748,
40481,
... | 2.358045 | 1,821 |
from .models import Likes, Reviews, Bookmarks, BaseCollection # noqa: F401
| [
6738,
764,
27530,
1330,
46077,
11,
20871,
11,
4897,
14306,
11,
7308,
36307,
220,
1303,
645,
20402,
25,
376,
21844,
198
] | 3.619048 | 21 |
"""Package for containing data manager functions to convert user input to commands."""
from manager.data_manager.mirror import PortMirrorConfig
from manager.data_manager.vlan import VlanConfig
__all__ = [
PortMirrorConfig,
VlanConfig,
]
| [
37811,
27813,
329,
7268,
1366,
4706,
5499,
284,
10385,
2836,
5128,
284,
9729,
526,
15931,
198,
198,
6738,
4706,
13,
7890,
62,
37153,
13,
10793,
1472,
1330,
4347,
27453,
1472,
16934,
198,
6738,
4706,
13,
7890,
62,
37153,
13,
85,
9620,
... | 3.430556 | 72 |
# import pchcloud api
import usermanager
import timerecording
import device
import pprint
import json
from datetime import datetime, timedelta
import dateutil.parser
import os
from pathlib import Path
parentdir = Path(__file__).parents[1]
pp = pprint.PrettyPrinter(indent=4)
if __name__ == "__main__":
# gets all time recordings from the last 5 days
# get current time
configfile=parentdir.joinpath('config/spectrum_config.json')
with open(configfile) as config_file:
config = json.load(config_file)
#print("Configuration: ")
#pp.pprint(config)
host = config["host"]
download_path=config['download_path']
setup_dir=parentdir.joinpath(download_path,host)
os.makedirs(setup_dir,exist_ok=True)
delete_on_server = config['delete_on_server']
query_passed_days = config['query_passed_days']
# set timerange
end = datetime.utcnow()
start = end - timedelta(days=query_passed_days)
# login
session = usermanager.login(host, config["username"], config["password"])
token = session['token']
devices = device.get_device_list(host, token)
devicefile=parentdir.joinpath(download_path,host,'devices.json')
with open(devicefile,"w") as s:
s.write(json.dumps(devices))
s.close()
spectrum_total=timerecording.get_spectrum_names(host,token)
#print(spectrum_total)
spectrum_setups=spectrum_total['spectrumSetups']
spectrumsetups=parentdir.joinpath(download_path,host,'spectrum_setups.json')
with open(spectrumsetups,"w") as s:
s.write(json.dumps(spectrum_setups))
s.close()
spectrumnames=parentdir.joinpath(download_path,host,'spectrum_names.json')
name_array=[]
print('\nSPECTRUM NAMES\n')
for elm in spectrum_setups:
name_array.append(elm['name'])
print(elm['name'])
with open(spectrumnames,"w") as s:
s.write(json.dumps(name_array))
s.close()
print("three .json files are made at for documentation at: \n" + str(devicefile) +"\n" + str(spectrumsetups) +"\n" + str(spectrumnames) )
| [
2,
1330,
279,
354,
17721,
40391,
198,
11748,
514,
2224,
3536,
198,
11748,
640,
8344,
1284,
198,
11748,
3335,
198,
198,
11748,
279,
4798,
198,
11748,
33918,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
198,
11748,
3128... | 2.228461 | 1,033 |
import torch
IMAGE_DIR = 'E:/Computer Vision/Segmentation/Data/Train/Images'
MASK_DIR = 'E:/Computer Vision/Segmentation/Data/Train/labels'
VALID_IMAGE_DIR = 'E:/Computer Vision/Segmentation/Data/Validation/Images'
VALID_MASK_DIR = 'E:/Computer Vision/Segmentation/Data/Validation/labels'
MODEL_SAVE_DIR = 'E:/Computer Vision/Segmentation'
LR = 1e-4
BATCH_SIZE = 8
NUM_EPOCHS = 10
DEVICE = torch.device('cuda') | [
11748,
28034,
201,
198,
201,
198,
3955,
11879,
62,
34720,
796,
705,
36,
14079,
34556,
19009,
14,
41030,
14374,
14,
6601,
14,
44077,
14,
29398,
6,
201,
198,
31180,
42,
62,
34720,
796,
705,
36,
14079,
34556,
19009,
14,
41030,
14374,
14,... | 2.647799 | 159 |
# Copyright (C) 202 The Dagger Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Macros for building with Bazel.
"""
load("@io_bazel_rules_kotlin//kotlin:kotlin.bzl", "kt_android_library")
def bazel_kt_android_library(name, kwargs):
"""A macro that wraps Bazel's kt_android_library.
This macro wraps Bazel's kt_android_library to output the jars files
in the expected locations (b/203519416). It also adds a dependency on
kotlin_stdlib if there are kotlin sources.
Args:
name: the name of the library.
kwargs: Additional arguments of the library.
"""
# If there are any kotlin sources, add the kotlin_stdlib, otherwise
# java-only projects may be missing a required runtime dependency on it.
if any([src.endswith(".kt") for src in kwargs.get("srcs", [])]):
# Add the kotlin_stdlib, otherwise it will be missing from java-only projects.
# We use deps rather than exports because exports isn't picked up by the pom file.
# See https://github.com/google/dagger/issues/3119
required_deps = ["@maven//:org_jetbrains_kotlin_kotlin_stdlib"]
kwargs["deps"] = kwargs.get("deps", []) + required_deps
# TODO(b/203519416): Bazel's kt_android_library outputs its jars under a target
# suffixed with "_kt". Thus, we have to do a bit of name aliasing to ensure that
# the jars exist at the expected targets.
kt_android_library(
name = "{}_internal".format(name),
**kwargs
)
native.alias(
name = name,
actual = ":{}_internal_kt".format(name),
)
native.alias(
name = "lib{}.jar".format(name),
actual = ":{}_internal_kt.jar".format(name),
)
native.alias(
name = "lib{}-src.jar".format(name),
actual = ":{}_internal_kt-sources.jar".format(name),
)
| [
2,
15069,
357,
34,
8,
22131,
383,
36320,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,... | 2.774614 | 843 |
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import cv2
import numpy as np
import time
start_time = time.time()
batch_size = 20
num_classes = 10
epochs = 20
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print('x[0]_train shape:', x_train[0].shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# model = Sequential()
# model.add(Conv2D(20, kernel_size=(5, 5),
# activation='relu',
# input_shape=input_shape))
# model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
# model.add(Conv2D(20, (5, 5), activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
# #model.add(Dropout(0.25))
# model.add(Flatten())
# model.add(Dense(500, activation='relu'))
# #model.add(Dropout(0.5))
# model.add(Dense(num_classes, activation='softmax'))
# model.compile(loss=keras.losses.categorical_crossentropy,
# optimizer=keras.optimizers.Adadelta(),
# metrics=['accuracy'])
model = Sequential()
model.add(Conv2D(8, kernel_size=(5, 5),strides=(1, 1),
activation='relu',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
model.add(Conv2D(16, (5, 5), strides=(1, 1) ,activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3),strides=(3, 3)))
model.add(Flatten())
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
# model.load_weights("model_weights_3.h5")
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
model.save('model_5_convnetjs.h5')
model.save_weights('model_weights_5_convnetjs.h5')
print("--- %s seconds ---" % (time.time() - start_time))
| [
11748,
41927,
292,
201,
198,
6738,
41927,
292,
13,
19608,
292,
1039,
1330,
285,
77,
396,
201,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
201,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
360,
1072,
11,
14258,
448,
11,
1610,
... | 2.199574 | 1,408 |