content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import os
from pytz import common_timezones
from django.conf import settings
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AdminPasswordChangeForm
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import authenticate, login, update_session_auth_hash
from django.contrib import messages
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.urls import reverse
from api.models.engage import Help_Guide
from api.models.configuration import Model, Model_User
@login_required
def home_view(request):
"""
View the "Home" page
Returns: HttpResponse
Example:
http://0.0.0.0:8000/
"""
user_models = Model_User.objects.filter(
user=request.user,
model__snapshot_version=None,
model__public=False).order_by('-last_access')
model_ids = user_models.values_list(
"model_id", flat=True)
snapshots = Model.objects.filter(
snapshot_base_id__in=model_ids)
public_models = Model.objects.filter(
snapshot_version=None,
public=True)
user_models = list(user_models)
if len(user_models) > 0:
last_model = user_models[0].model
elif len(public_models) > 0:
last_model = public_models[0]
else:
last_model = None
context = {
"timezones": common_timezones,
"last_model": last_model,
"user_models": user_models,
"public_models": public_models,
"snapshots": snapshots,
"mapbox_token": settings.MAPBOX_TOKEN,
"help_content": Help_Guide.get_safe_html('home'),
}
return render(request, "home.html", context)
@login_required
def share_view(request):
"""
View the "Model Sharing" page
Parameters:
model_uuid (uuid): required
Returns: HttpResponse
Example:
http://0.0.0.0:8000/share/
"""
selected_model_uuid = request.GET.get('model_uuid', None)
user_models = Model_User.objects.filter(user=request.user,
model__is_uploading=False)
users = User.objects.all().exclude(
id=request.user.id).order_by('last_name', 'first_name')
context = {
"timezones": common_timezones,
"user": request.user,
"users": users,
"user_models": user_models,
"selected_model_uuid": str(selected_model_uuid),
"help_content": Help_Guide.get_safe_html('share'),
}
return render(request, "share.html", context)
@login_required
def password_view(request):
"""
View the "Password Manager" page
Parameters:
model_uuid (uuid): required
Returns: HttpResponse
Example:
http://0.0.0.0:8000/settings/password/
"""
user = request.user
if user.has_usable_password():
PasswordForm = PasswordChangeForm
else:
PasswordForm = AdminPasswordChangeForm
if request.method == 'POST':
form = PasswordForm(user, request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
messages.success(request,
"Your password was successfully updated!")
return redirect('password')
else:
messages.error(request, 'Please correct the error below')
else:
form = PasswordForm(user)
context = {
'user': user,
'form': form,
}
return render(request, "password.html", context)
@login_required
def admin_login_view(request):
"""
Redirect to login view
Parameters:
model_uuid (uuid): required
Returns: HttpResponse
Example:
http://0.0.0.0:8000/share/
"""
context = {}
return render(request, "share.html", context)
def user_login(request):
"""
View the "Login" page
Returns: HttpResponse
Example:
http://0.0.0.0:8000/login/
"""
redirect_to = request.GET.get('next', '')
status = request.GET.get('status', 'login')
status_messages = {'active': ("Account has been activated!\n"
"Please proceed to login..."),
'inactive': ("Account has not yet been activated!\n"
"Email must be verified first..."),
'invalid-email': ("Email is invalid!\n"
"Please try again..."),
'invalid-password': ("Password is invalid!\n"
"Please try again...")}
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('home'))
if request.method == 'POST':
email = request.POST.get('email').lower()
password = request.POST.get('password')
user = User.objects.filter(username=email).first()
if user:
if not user.is_active:
url = "%s?status=inactive" % reverse('login')
return HttpResponseRedirect(url)
else:
user = authenticate(username=email, password=password)
if user:
login(request, user)
if redirect_to:
return HttpResponseRedirect(redirect_to)
else:
return HttpResponseRedirect(reverse('home'))
else:
url = "%s?status=invalid-password" % reverse('login')
return HttpResponseRedirect(url)
else:
url = "%s?status=invalid-email" % reverse('login')
return HttpResponseRedirect(url)
else:
public_models = Model.objects.filter(
snapshot_version=None,
public=True)
context = {'redirect_to': redirect_to,
'status': status_messages.get(status, ''),
'public_models': public_models}
return render(request, 'registration/login.html', context)
| [
11748,
28686,
198,
6738,
12972,
22877,
1330,
2219,
62,
2435,
89,
1952,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18... | 2.249907 | 2,689 |
"""
Data-driven tests for g2p.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import rdflib
import ga4gh.datamodel.genotype_phenotype as genotype_phenotype
import ga4gh.datamodel.datasets as datasets
import ga4gh.protocol as protocol
import tests.datadriven as datadriven
import tests.paths as paths
| [
37811,
198,
6601,
12,
15808,
5254,
329,
308,
17,
79,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 3.122951 | 122 |
add_library('video')
| [
2860,
62,
32016,
10786,
15588,
11537,
628
] | 3.142857 | 7 |
# Added recovery lap data - counter-steering angle 1.0
#import os
import csv
#import cv2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, SpatialDropout2D, ELU
from keras.layers import Conv2D, MaxPooling2D, Cropping2D
from keras.layers.core import Lambda
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
from keras.models import model_from_json
from keras.optimizers import Adam
## Load udacity sample data
samples = []
with open('./data_udacity_recovery/driving_log_udacity.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
samples = samples[1:] # Remove header
num_samples = len(samples)
## Add left camera data
samples_addleft = samples
for i in range(0, num_samples):
r=samples[i]
left_name = './data_udacity_recovery/IMG/'+r[1].split('/')[-1]
left_angle = float(r[3])+0.25 # counter-steering angle
r[0]=left_name
r[3]=left_angle
samples_addleft.append(r)
samples = samples_addleft
## Add right camera data
samples_addright = samples
for i in range(0, num_samples):
r=samples[i]
right_name = './data_udacity_recovery/IMG/'+r[2].split('/')[-1]
right_angle = float(r[3])-0.25 # counter-steering angle
r[0]=right_name
r[3]=right_angle
samples_addright.append(r)
samples = samples_addright
## Load recovery lap data - left lane
samples_addrecoveryleft = samples
with open('./data_udacity_recovery/driving_log_recovery_left.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
recovery_left_angle = float(r[3])+1. # counter-steering angle
line[3]=recovery_left_angle
samples_addrecoveryleft.append(line)
samples = samples_addrecoveryleft
## Load recovery lap data - right lane
samples_addrecoveryright = samples
with open('./data_udacity_recovery/driving_log_recovery_right.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
recovery_right_angle = float(r[3])-1. # counter-steering angle
line[3]=recovery_right_angle
samples_addrecoveryright.append(line)
samples = samples_addrecoveryright
N_orig = len(samples)
angle_orig = []
for i in range(0, N_orig):
r = samples[i]
angle_orig.append(float(r[3]))
print("Sample size (Original): ",N_orig)
## Cull sample data with low steering angles
samples_cull = []
for i in range(0, N_orig):
r = samples[i]
if abs(float(r[3]))>.05:
samples_cull.append(r)
elif np.random.randint(10) > 8: # Remove 80% of sample data with low steering angles
samples_cull.append(r)
samples = samples_cull
N_cull = len(samples)
angle_cull = []
for i in range(0, N_cull):
r = samples[i]
angle_cull.append(float(r[3]))
print("Sample size (Culled): ",N_cull)
# Frequency distribution of steering angles
hist, bins = np.histogram(angle_orig, bins=25, range=(-1,1))
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show() # Original data
hist, bins = np.histogram(angle_cull, bins=25, range=(-1,1))
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show() # Culled data
## Split samples into training (80%) and test sets (20%)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
## Define generator
## Define image resizing function to fit Comma.ai model
## Compile model using generator
train_generator = generator(train_samples,batch_size=32)
validation_generator = generator(validation_samples,batch_size=32)
## Comma.ai model
model = Sequential()
model.add(Cropping2D(cropping=((70,25),(0,0)), # Crop 70 pixels from the top and 25 from the bottom
input_shape=(160,320,3),data_format="channels_last"))
model.add(Lambda(resize_image))# Resize image
model.add(Lambda(lambda x: (x/127.5) - 1.)) # Normalize signal intensity
model.add(Conv2D(16,(8,8),strides=(4,4),padding="same",activation="elu"))# Conv layer 1
model.add(Conv2D(32,(5,5),strides=(2,2),padding="same",activation="elu"))# Conv layer 2
model.add(Conv2D(64,(5,5),strides=(2,2),padding="same"))# Conv layer 3
model.add(Flatten())
model.add(Dropout(.5))
model.add(ELU())
model.add(Dense(512))# Fully connected layer 1
model.add(Dropout(.5))
model.add(ELU())
model.add(Dense(50))# Fully connected layer 2
model.add(ELU())
model.add(Dense(1))
model.compile(loss='mse', optimizer=Adam(lr=0.0001),metrics=['accuracy'])
print("Model summary:\n", model.summary())
history_object = model.fit_generator(train_generator,
steps_per_epoch=len(train_samples),
validation_data=validation_generator,
validation_steps=len(validation_samples),
epochs=20,verbose=1)
## print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
## Save model
name = '20170401_model_test'
with open(name + '.json', 'w') as output:
output.write(model.to_json())
model.save(name + '.h5')
print("Saved model to disk")
# python drive.py 20170401_model_test.h5 run01
# python video.py run01 | [
2,
10687,
7628,
14779,
1366,
532,
3753,
12,
4169,
1586,
9848,
352,
13,
15,
198,
198,
2,
11748,
28686,
198,
11748,
269,
21370,
198,
2,
11748,
269,
85,
17,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,... | 2.520191 | 2,303 |
from typing import Callable
| [
6738,
19720,
1330,
4889,
540,
628
] | 4.833333 | 6 |
from django.shortcuts import render,redirect
from django.http import HttpResponse
from model.community import *
from model.oss import *
from view.common import *
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
from django.db.models import Sum, Count
from operator import itemgetter
from django.http import JsonResponse
from influxdb_metrics.utils import query
from influxdb import InfluxDBClient
import time
import datetime
import statsmodels.api as sm
import pandas as pd
import math
import numpy as np
client = InfluxDBClient('106.52.93.154', 8086, 'moose', 'moose', 'moose')
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
445,
1060,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
2746,
13,
28158,
1330,
1635,
198,
6738,
2746,
13,
793,
1330,
1635,
198,
6738,
1570,
13,
11321,
1330,... | 3.420455 | 176 |
#Copyright [2017] [Mauro Riva <lemariva@mail.com> <lemariva.com>]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
import math as m
import utime
from machine import ADC
from ws2812 import WS2812
| [
2,
15269,
685,
5539,
60,
685,
44,
559,
305,
371,
12151,
1279,
293,
3876,
12151,
31,
4529,
13,
785,
29,
1279,
293,
3876,
12151,
13,
785,
37981,
198,
198,
2,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
3... | 3.632287 | 223 |
'''
Author: your name
Date: 2021-05-17 03:11:15
LastEditTime: 2021-05-18 14:46:09
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: /twitterSententAnalyse 2/data_helper.py
'''
#coding:utf-8
from ext_en import *
import sys
stops = loadStops()
X, y = getTxt()
X_feather = []
fw = open('./filter_txt_train_data.txt', 'w')
for x,y in zip(X, y):
text = x.strip('"')
token_words = tokenize(text)
token_words = stem(token_words)
token_words = delete_stopwords(token_words)
token_words = delete_characters(token_words)
token_words = to_lower(token_words)
fw.write(' '.join(token_words)+'\t'+y+'\n')
fw.flush()
| [
7061,
6,
201,
198,
13838,
25,
534,
1438,
201,
198,
10430,
25,
33448,
12,
2713,
12,
1558,
7643,
25,
1157,
25,
1314,
201,
198,
5956,
18378,
7575,
25,
33448,
12,
2713,
12,
1507,
1478,
25,
3510,
25,
2931,
201,
198,
5956,
18378,
669,
2... | 2.285246 | 305 |
import codecs
import os
import re
import typing as tp
from satella.exceptions import ConfigurationValidationError
from .base import Descriptor, ConfigDictValue
from .registry import register_custom_descriptor
@staticmethod
@register_custom_descriptor('bool')
class Boolean(Descriptor):
"""
This value must be a boolean, or be converted to one
"""
BASIC_MAKER = _make_boolean
@register_custom_descriptor('int')
class Integer(Descriptor):
"""
This value must be an integer, or be converted to one
"""
BASIC_MAKER = int
@register_custom_descriptor('float')
class Float(Descriptor):
"""
This value must be a float, or be converted to one
"""
BASIC_MAKER = float
@register_custom_descriptor('str')
class String(Descriptor):
"""
This value must be a string, or be converted to one
"""
BASIC_MAKER = str
class FileObject:
"""
What you get for values in schema of :class:`~satella.configuration.schema.File`.
This object is comparable and hashable, and is equal to the string of it's path
"""
__slots__ = 'path',
def get_value(self, encoding: tp.Optional[str] = None) -> tp.Union[str, bytes]:
"""
Read in the entire file into memory
:param encoding: optional encoding to apply. If None given, bytes will be returned
:return: file contents
"""
with open(self.path, 'rb') as f_in:
data = f_in.read()
if encoding:
return data.decode(encoding)
else:
return data
def open(self, mode: str):
"""
Open the file in specified mode
:param mode: mode to open the file in
:return: file handle
"""
return open(self.path, mode)
class DirectoryObject:
"""
What you get for values in schema of :class:`~satella.configuration.schema.Directory`.
This object is comparable and hashable, and is equal to the string of it's path
"""
__slots__ = 'path',
def get_files(self) -> tp.Iterable[str]:
"""
Return a list of files inside this directory
:return:
"""
return os.listdir(self.path)
@staticmethod
@register_custom_descriptor('file')
class File(Descriptor):
"""
This value must be a valid path to a file. The value in your schema will be
an instance of :class:`~satella.configuration.schema.basic.FileObject`
"""
BASIC_MAKER = _make_file
@register_custom_descriptor('file_contents')
class FileContents(Descriptor):
"""
This value must be a valid path to a file. The value in your schema will be
the contents of this file, applied with encoding (if given). By default, bytes will be read in
"""
@staticmethod
@register_custom_descriptor('dir')
class Directory(Descriptor):
"""
This value must be a valid path to a file. The value in your schema will be
an instance of :class:`~satella.configuration.schema.basic.FileObject`
"""
BASIC_MAKER = _make_directory
class Regexp(String):
"""
Base class for declaring regexp-based descriptors. Overload it's attribute REGEXP. Use as
following:
>>> class IPv6(Regexp):
>>> REGEXP = '(([0-9a-f]{1,4}:)' ...
"""
__slots__ = ('regexp',)
REGEXP = r'.*'
@register_custom_descriptor('ipv4')
class IPv4(Regexp):
"""
This must be a valid IPv4 address (no hostnames allowed)
"""
REGEXP = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
| [
11748,
40481,
82,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
19720,
355,
256,
79,
198,
198,
6738,
8983,
64,
13,
1069,
11755,
1330,
28373,
7762,
24765,
12331,
198,
6738,
764,
8692,
1330,
2935,
6519,
273,
11,
17056,
35,
713,
11395,
... | 2.627644 | 1,324 |
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import os
import json
from sawtooth_cli.network_command.parent_parsers import base_multinode_parser
from sawtooth_cli.network_command.parent_parsers import split_comma_append_args
from sawtooth_cli.network_command.parent_parsers import make_rest_apis
from sawtooth_cli.exceptions import CliException
DOT_FILE = 'peers.dot'
| [
2,
15069,
2864,
8180,
10501,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330... | 3.74812 | 266 |
import sys
import os
REMOVE_THESE = ["-I/usr/include", "-I/usr/include/", "-L/usr/lib", "-L/usr/lib/"]
if __name__ == "__main__":
pkg_names = []
pkg_dict = {}
commands = []
exist_check = False
for i in range(1,len(sys.argv)):
if sys.argv[i][0] == '-':
cmd = sys.argv[i]
commands.append(cmd)
if cmd=='--exists':
exist_check = True
elif cmd=="--help":
print "This is not very helpful, is it"
sys.exit(0)
elif cmd=="--version":
print "0.1"
sys.exit(0)
else:
pkg_names.append(sys.argv[i])
# Fix search path
PKG_CONFIG_PATH = os.getenv("PKG_CONFIG_PATH", "").strip()
if not PKG_CONFIG_PATH:
PKG_CONFIG_PATH="/usr/local/lib/pkgconfig:/usr/lib/pkgconfig"
PKG_CONFIG_PATH = PKG_CONFIG_PATH.replace(";", ":")
# Parse files
for pkg_name in pkg_names:
pkg = Pkg(pkg_name)
if not pkg.parse(PKG_CONFIG_PATH):
sys.exit(1)
pkg_dict[pkg_name] = pkg
if exist_check:
sys.exit(0)
# Calculate priority based on dependency
for pkg_name in pkg_dict.keys():
pkg = pkg_dict[pkg_name]
pkg.priority = calculate_pkg_priority(pkg, pkg_dict, 1)
# Sort package based on dependency
pkg_names = sorted(pkg_names, key=lambda pkg_name: pkg_dict[pkg_name].priority, reverse=True)
# Get the options
opts = []
for cmd in commands:
if cmd=='--libs':
for pkg_name in pkg_names:
libs = pkg_dict[pkg_name].libs()
for lib in libs:
opts.append(lib)
if lib[:2]=="-l":
break
for pkg_name in pkg_names:
opts += pkg_dict[pkg_name].libs()
elif cmd=='--cflags':
for pkg_name in pkg_names:
opts += pkg_dict[pkg_name].cflags()
elif cmd[0]=='-':
sys.stderr.write("pkgconfig.py: I don't know how to handle " + sys.argv[i] + "\n")
filtered_opts = []
for opt in opts:
opt = opt.strip()
if not opt:
continue
if REMOVE_THESE.count(opt) != 0:
continue
if filtered_opts.count(opt) != 0:
continue
filtered_opts.append(opt)
print ' '.join(filtered_opts)
| [
11748,
25064,
198,
11748,
28686,
198,
198,
2200,
11770,
6089,
62,
4221,
33635,
796,
14631,
12,
40,
14,
14629,
14,
17256,
1600,
27444,
40,
14,
14629,
14,
17256,
14,
1600,
27444,
43,
14,
14629,
14,
8019,
1600,
27444,
43,
14,
14629,
14,
... | 2.157316 | 909 |
# Generated by Django 2.0.6 on 2018-06-20 10:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
import uuid
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
21,
319,
2864,
12,
3312,
12,
1238,
838,
25,
1731,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
1420... | 3.112903 | 62 |
import subprocess
import sys
import platform
import os
finder = {"Linux": "which",
"Darwin": "which",
"Windows": "where"}
| [
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
3859,
198,
11748,
28686,
198,
198,
22805,
796,
19779,
19314,
1298,
366,
4758,
1600,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
366,
32708,
5404,
1298,
366,
4758,
1600,
198,
220,
... | 2.571429 | 56 |
# MEDIUM
# cur records the count of "aeiou"
# cur & 1 = the records of a % 2
# cur & 2 = the records of e % 2
# cur & 4 = the records of i % 2
# cur & 8 = the records of o % 2
# cur & 16 = the records of u % 2
# seen note the index of first occurrence of cur
# a e i o u other
# "aeiou".indexOf(s.charAt(i)) + 1 1 2 3 4 5 0
# 1 << tmp 2 4 8 16 32 1
# (1 << tmp) >> 1 1 2 4 8 16 0 | [
198,
2,
26112,
41796,
198,
2,
1090,
4406,
262,
954,
286,
366,
3609,
72,
280,
1,
198,
2,
1090,
1222,
352,
796,
262,
4406,
286,
257,
4064,
362,
198,
2,
1090,
1222,
362,
796,
262,
4406,
286,
304,
4064,
362,
198,
2,
1090,
1222,
604,... | 1.661891 | 349 |
from komandos import *
import heapq as H
if __name__ == '__main__':
mem = set()
initState = getInitialState()
randPath = []
path = []
# for i in range(140):
# move = random.choice(getMoves())
# initState = apllyMove(initState, move)
# randPath.append(move)
path = aStar(mem, initState, h)
saveOtput(randPath + path)
| [
6738,
479,
296,
392,
418,
1330,
1635,
198,
11748,
24575,
80,
355,
367,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1066,
796,
900,
3419,
198,
220,
220,
220,
2315,
9012,
796,
651,
2424... | 2.333333 | 159 |
# import time
# import random
# seed = 1
# list1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
# temp = random.randint(0, 25)
# count = 0
# for i in range(0,100):
# if i % 9 == 0 and i <= 81:
# print(str(i) + ':' + list1[temp], end="\t")
# count += 1
# else:
# print(str(i) + ':' + list1[random.randint(0, 25)], end="\t")
# count += 1
# if count % 10 == 0:
# print()
# print()
# print('请在脑海中想一个数字,将它减去十位上的数字,再减去个位上的数字,得到最终数字,并记住最终数字对应的字母')
# s = input('记住字母后按回车开始读心!')
# for i in range(5, 0, -1):
# print(i)
# time.sleep(1)
# print("你心中的字母是:" + list1[temp])
# 任务一:
import random
list1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z']
temp = random.randint(0, 25)
print(list1)
print(temp)
# 任务二:
import random
list1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z']
temp = random.randint(0, 25)
count = 0
for i in range(0, 100):
if i % 9 == 0 and i <= 81:
print(str(i) + ':' + list1[temp], end="\t")
count += 1
else:
print(str(i) + ':' + list1[random.randint(0, 25)], end="\t")
count += 1
if count % 10 == 0:
print()
# 任务三:
import time
import random
seed = 1
list1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z']
temp = random.randint(0, 25)
count = 0
for i in range(0, 100):
if i % 9 == 0 and i <= 81:
print(str(i) + ':' + list1[temp], end="\t")
count += 1
else:
print(str(i) + ':' + list1[random.randint(0, 25)], end="\t")
count += 1
if count % 10 == 0:
print()
print()
print('请在脑海中想一个数字,将它减去十位上的数字,再减去个位上的数字,得到最终数字,并记住最终数字对应的字母')
s = input('记住字母后按回车开始读心!')
print("你心中的字母是:" + list1[temp])
# 最终代码
import time
import random
seed = 1
list1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z']
temp = random.randint(0, 25)
count = 0
for i in range(0, 100):
if i % 9 == 0 and i <= 81:
print(str(i) + ':' + list1[temp], end="\t")
count += 1
else:
print(str(i) + ':' + list1[random.randint(0, 25)], end="\t")
count += 1
if count % 10 == 0:
print()
print()
print('请在脑海中想一个数字,将它减去十位上的数字,再减去个位上的数字,得到最终数字,并记住最终数字对应的字母')
s = input('记住字母后按回车开始读心!')
for i in range(5, 0, -1):
print(i)
time.sleep(1)
print("你心中的字母是:" + list1[temp]) | [
2,
1330,
640,
198,
2,
1330,
4738,
198,
2,
9403,
796,
352,
198,
2,
1351,
16,
796,
37250,
32,
3256,
705,
33,
3256,
705,
34,
3256,
705,
35,
3256,
705,
36,
3256,
705,
37,
3256,
705,
38,
3256,
705,
39,
3256,
705,
40,
3256,
705,
41,... | 1.514763 | 1,795 |
import os
import time
import toml
import telegram_bot_api as api
SKRIPTPFAD = os.path.abspath(os.path.dirname(__file__))
CONFIG = config_laden()
USER = CONFIG["telegram"]["user"]
BOT = api.Bot(CONFIG["telegram"]["token"])
ANTWORTEN = ["Komme gleich", "1 Min", "5 Min"]
if __name__ == "__main__":
main()
| [
11748,
28686,
198,
11748,
640,
198,
198,
11748,
284,
4029,
198,
198,
11748,
573,
30536,
62,
13645,
62,
15042,
355,
40391,
628,
198,
198,
18831,
32618,
7250,
37,
2885,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
1... | 2.424242 | 132 |
import base64
import logging
import json
from calendar import monthrange
import datetime
from httplib2 import Http
from json import dumps
def handle_notification(event, context):
"""Triggered from a message on a Cloud Pub/Sub topic.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
pubsub_message = base64.b64decode(event['data']).decode('utf-8')
logging.info('Budget information: {}'.format(pubsub_message))
jsonPayload = json.loads(pubsub_message)
costAmount = jsonPayload['costAmount']
budgetAmount = jsonPayload['budgetAmount']
percentOfBudget = round((costAmount/budgetAmount) * 100,2)
budgetDisplayName = jsonPayload['budgetDisplayName']
costIntervalStart = jsonPayload['costIntervalStart']
percentOfMonth = calcMonthPercent(costIntervalStart)
trendingPercent = round(percentOfBudget - percentOfMonth,2)
#logging.info('costAmount: {}'.format(costAmount))
#logging.info('budgetAmount: {}'.format(budgetAmount))
#logging.info('percentOfBudget: {}'.format(percentOfBudget))
#logging.info('budgetDisplayName: {}'.format(budgetDisplayName))
if trendingPercent >= 1:
message_text = "{}".format(budgetDisplayName) + ": {}".format(trendingPercent) + "% higher than last month (${:.2f}".format(costAmount) + "/${:.2f}".format(budgetAmount) + ")"
elif trendingPercent < 1 and trendingPercent > -1:
message_text = "{}".format(budgetDisplayName) + ": On target (+/- 1%) (${:.2f}".format(costAmount) + "/${:.2f}".format(budgetAmount) + ")"
else:
message_text = "{}".format(budgetDisplayName) + ": {}".format(trendingPercent) + "% lower than last month (${:.2f}".format(costAmount) + "/${:.2f}".format(budgetAmount) + ")"
logging.info('message_text: {}'.format(message_text))
timeToSend = chatLimiter(percentOfBudget, percentOfMonth)
if timeToSend == True:
sendChatMessage(message_text)
| [
11748,
2779,
2414,
198,
11748,
18931,
198,
11748,
33918,
198,
6738,
11845,
1330,
1227,
9521,
198,
11748,
4818,
8079,
198,
6738,
1841,
489,
571,
17,
1330,
367,
29281,
198,
6738,
33918,
1330,
45514,
198,
198,
4299,
5412,
62,
1662,
2649,
7... | 2.833333 | 702 |
from attendanceTaker import Student, classDatabase
import tkinter as tk
if __name__ == "__main__":
main()
| [
6738,
14858,
51,
3110,
1330,
13613,
11,
1398,
38105,
198,
11748,
256,
74,
3849,
355,
256,
74,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
197,
12417,
3419,
198
] | 3.205882 | 34 |
"""Top-level package for TopoJoin."""
__author__ = "DSR"
__email__ = "info@simmonsritchie.com"
__version__ = "__version__ = '0.3.1'"
| [
37811,
9126,
12,
5715,
5301,
329,
5849,
78,
18234,
526,
15931,
198,
198,
834,
9800,
834,
796,
366,
5258,
49,
1,
198,
834,
12888,
834,
796,
366,
10951,
31,
82,
8608,
684,
799,
3043,
13,
785,
1,
198,
834,
9641,
834,
796,
366,
834,
... | 2.436364 | 55 |
#!python3
"""
Predict protein pKa based on MCCE method.
http://pka.engr.ccny.cuny.edu/
Require MCCE 3.0 to work: https://anaconda.org/SalahSalah/mcce/files
"""
import asyncio
import glob
import gzip
import locale
import logging
import math
import os
import re
import shutil
import subprocess
import sys
import time
from multiprocessing import Pool
from urllib.request import urlopen
import aioftp
import pandas as pd
import uvloop
# Sapelo Locale is broken, quick fix
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
# Set working directory
ROOTPATH = os.path.dirname(os.path.realpath(sys.argv[0]))
os.chdir(ROOTPATH)
# Log settings
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(f"./pKa_calculation_{__file__}.log")
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s\t%(levelname)s\t"
"[%(filename)s:%(lineno)s -%(funcName)12s()]\t%(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
if __name__ == "__main__":
x = pdb()
x.load_id()
urls = x.get_link(x.download_ids)
x.make_dirs(x.all_ids)
x.download_queue(urls)
x.check_mcce()
for id in x.unzip_ids:
x.unzip(id)
for id in x.preprocess_ids:
try:
x.preprocess(id)
x.set_params(id)
except Exception as e:
x.error_ids.append(id)
logger.warning(f"Preprocess of {id}: {e}")
# subprocess.run(["find", ".", "-type", "d", "-empty", "-delete"])
x.split_ready_ids(0) # 0 - 9, run 0 first to generate other lists
with Pool(os.cpu_count()) as p:
p.map(x.calc_pka, x.working_ids)
with open("./results/finished_ids.list", "a") as f:
f.write("\n".join(x.working_ids))
with open("./results/error_ids.list", "a") as f:
f.write("\n".join(x.error_ids))
| [
2,
0,
29412,
18,
201,
198,
37811,
201,
198,
47,
17407,
7532,
279,
37281,
1912,
319,
337,
4093,
36,
2446,
13,
201,
198,
4023,
1378,
79,
4914,
13,
1516,
81,
13,
535,
3281,
13,
66,
403,
88,
13,
15532,
14,
201,
198,
201,
198,
16844,... | 2.152466 | 892 |
# -*- coding: utf-8 -*-
"""
meraki
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
import meraki.models.custom_pie_chart_item_model
class UpdateNetworkTrafficAnalysisSettingsModel(object):
"""Implementation of the 'updateNetworkTrafficAnalysisSettings' model.
TODO: type model description here.
Attributes:
mode (Mode1Enum): The traffic analysis mode for the network. Can be
one of 'disabled' (do not collect traffic types), 'basic'
(collect generic traffic categories), or 'detailed' (collect
destination hostnames).
custom_pie_chart_items (list of CustomPieChartItemModel): The list of
items that make up the custom pie chart for traffic reporting.
"""
# Create a mapping from Model property names to API property names
_names = {
"mode":'mode',
"custom_pie_chart_items":'customPieChartItems'
}
def __init__(self,
mode=None,
custom_pie_chart_items=None):
"""Constructor for the UpdateNetworkTrafficAnalysisSettingsModel class"""
# Initialize members of the class
self.mode = mode
self.custom_pie_chart_items = custom_pie_chart_items
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
mode = dictionary.get('mode')
custom_pie_chart_items = None
if dictionary.get('customPieChartItems') != None:
custom_pie_chart_items = list()
for structure in dictionary.get('customPieChartItems'):
custom_pie_chart_items.append(meraki.models.custom_pie_chart_item_model.CustomPieChartItemModel.from_dictionary(structure))
# Return an object of this model
return cls(mode,
custom_pie_chart_items)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
37811,
201,
198,
220,
220,
220,
4017,
8182,
201,
198,
201,
198,
220,
220,
220,
770,
2393,
373,
6338,
7560,
329,
4017,
8182,
416,
3486,
3955,
1404,
2149,... | 2.423307 | 1,004 |
from sample_players import DataPlayer
import random
class CustomPlayer(DataPlayer):
""" Implement your own agent to play knight's Isolation
The get_action() method is the only *required* method. You can modify
the interface for get_action by adding named parameters with default
values, but the function MUST remain compatible with the default
interface.
**********************************************************************
NOTES:
- You should **ONLY** call methods defined on your agent class during
search; do **NOT** add or call functions outside the player class.
The isolation library wraps each method of this class to interrupt
search when the time limit expires, but the wrapper only affects
methods defined on this class.
- The test cases will NOT be run on a machine with GPU access, nor be
suitable for using any other machine learning techniques.
**********************************************************************
"""
def get_action(self, state):
""" Employ an adversarial search technique to choose an action
available in the current state calls self.queue.put(ACTION) at least
This method must call self.queue.put(ACTION) at least once, and may
call it as many times as you want; the caller is responsible for
cutting off the function after the search time limit has expired.
See RandomPlayer and GreedyPlayer in sample_players for more examples.
**********************************************************************
NOTE:
- The caller is responsible for cutting off search, so calling
get_action() from your own code will create an infinite loop!
Refer to (and use!) the Isolation.play() function to run games.
**********************************************************************
"""
# TODO: Replace the example implementation below with your own search
# method by combining techniques from lecture
#
# EXAMPLE: choose a random move without any search--this function MUST
# call self.queue.put(ACTION) at least once before time expires
# (the timer is automatically managed for you)
# if state.board in self.data:
# self.queue.put(self.data[state.board])
# return
self.queue.put(random.choice(state.actions()))
self.depth = 1
while True:
self.queue.put(self.decision(state, self.depth))
self.depth += 1
# To call the heuristic function
# Number of open cells available withon a 5*5 squre
# Number of available next moves
# Number of open cells available within a 3*3 squre
| [
198,
6738,
6291,
62,
32399,
1330,
6060,
14140,
198,
11748,
4738,
198,
198,
4871,
8562,
14140,
7,
6601,
14140,
2599,
198,
220,
220,
220,
37227,
48282,
534,
898,
5797,
284,
711,
22062,
338,
1148,
21417,
628,
220,
220,
220,
383,
651,
62,... | 3.247944 | 851 |
from .base_metric import BaseMetric
from .accuracy import Accuracy
from .exact_matches import ExactMatches
from .pr_rec_f1 import PrRecF1
from .rouge_multi import RougeMulti
from .multi_ref_rouge import MultiRefRouge
from .soft_pr_rec_f1 import SoftPrRecF1
| [
6738,
764,
8692,
62,
4164,
1173,
1330,
7308,
9171,
1173,
198,
6738,
764,
4134,
23843,
1330,
33222,
198,
6738,
764,
1069,
529,
62,
6759,
2052,
1330,
1475,
529,
19044,
2052,
198,
6738,
764,
1050,
62,
8344,
62,
69,
16,
1330,
1736,
6690,
... | 2.954023 | 87 |
# Return the size of the largest sub-tree which is also a BST
import sys
sys.setrecursionlimit(1000000)
from collections import deque
# Tree Node
# Function to Build Tree
if __name__ == "__main__":
t = int(input())
for _ in range(0, t):
s = input()
root = buildTree(s)
print(largestBst(root))
| [
628,
198,
2,
8229,
262,
2546,
286,
262,
4387,
850,
12,
21048,
543,
318,
635,
257,
44992,
628,
628,
198,
11748,
25064,
198,
17597,
13,
2617,
8344,
24197,
32374,
7,
16,
10535,
8,
198,
198,
6738,
17268,
1330,
390,
4188,
198,
198,
2,
... | 2.575758 | 132 |
import abc
from collections import OrderedDict
import time
import gtimer as gt
import numpy as np
from rlkit.core import logger, eval_util
from rlkit.data_management.env_replay_buffer import MultiTaskReplayBuffer,EnvReplayBuffer
from rlkit.data_management.path_builder import PathBuilder
from rlkit.samplers.in_place import SMMInPlacePathSampler, InPlacePathSampler,SeedInPlacePathSampler, ExpInPlacePathSampler,ExpInPlacePathSamplerSimple
from rlkit.torch import pytorch_util as ptu
from rlkit.smm.smm_policy import hard_smm_point
from rlkit.smm.smm_sampler import SMMSampler
from rlkit.policies.base import ExplorationPolicy
import pickle
import torch
| [
11748,
450,
66,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
640,
198,
198,
11748,
308,
45016,
355,
308,
83,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
374,
75,
15813,
13,
7295,
1330,
49706,
11,
5418,
62,
22602,
... | 3.065116 | 215 |
import os
import time
import csh_ldap as ldap
import login
if __name__ == "__main__":
main()
| [
11748,
28686,
198,
11748,
640,
198,
11748,
269,
1477,
62,
335,
499,
355,
300,
67,
499,
198,
11748,
17594,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.575 | 40 |
"""
PostGIS to GDAL conversion constant definitions
"""
# Lookup to convert pixel type values from GDAL to PostGIS
GDAL_TO_POSTGIS = [None, 4, 6, 5, 8, 7, 10, 11, None, None, None, None]
# Lookup to convert pixel type values from PostGIS to GDAL
POSTGIS_TO_GDAL = [1, 1, 1, 3, 1, 3, 2, 5, 4, None, 6, 7, None, None]
# Struct pack structure for raster header, the raster header has the
# following structure:
#
# Endianness, PostGIS raster version, number of bands, scale, origin,
# skew, srid, width, and height.
#
# Scale, origin, and skew have x and y values. PostGIS currently uses
# a fixed endianness (1) and there is only one version (0).
POSTGIS_HEADER_STRUCTURE = 'B H H d d d d d d i H H'
# Lookup values to convert GDAL pixel types to struct characters. This is
# used to pack and unpack the pixel values of PostGIS raster bands.
GDAL_TO_STRUCT = [
None, 'B', 'H', 'h', 'L', 'l', 'f', 'd',
None, None, None, None,
]
# Size of the packed value in bytes for different numerical types.
# This is needed to cut chunks of band data out of PostGIS raster strings
# when decomposing them into GDALRasters.
# See https://docs.python.org/library/struct.html#format-characters
STRUCT_SIZE = {
'b': 1, # Signed char
'B': 1, # Unsigned char
'?': 1, # _Bool
'h': 2, # Short
'H': 2, # Unsigned short
'i': 4, # Integer
'I': 4, # Unsigned Integer
'l': 4, # Long
'L': 4, # Unsigned Long
'f': 4, # Float
'd': 8, # Double
}
# See https://trac.osgeo.org/postgis/wiki/WKTRaster/RFC/RFC1_V0SerialFormat#Pixeltypeandstorageflag
BANDTYPE_FLAG_HASNODATA = 1 << 6
| [
37811,
198,
6307,
38,
1797,
284,
27044,
1847,
11315,
6937,
17336,
198,
37811,
198,
2,
6803,
929,
284,
10385,
17465,
2099,
3815,
422,
27044,
1847,
284,
2947,
38,
1797,
198,
45113,
1847,
62,
10468,
62,
32782,
38,
1797,
796,
685,
14202,
... | 2.671074 | 605 |
# coding: utf8
"""
题目链接: https://leetcode.com/problems/search-in-rotated-sorted-array-ii/description.
题目描述:
Follow up for "Search in Rotated Sorted Array":
What if duplicates are allowed?
Would this affect the run-time complexity? How and why?
Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
(i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
Write a function to determine if a given target is in the array.
The array may contain duplicates.
"""
| [
2,
19617,
25,
3384,
69,
23,
628,
198,
37811,
198,
220,
220,
220,
16268,
95,
246,
33566,
106,
165,
241,
122,
162,
236,
98,
25,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
12947,
12,
259,
12,
10599,
515,
12,
82,
9741... | 2.816754 | 191 |
from rest_framework import serializers
from items.models import Item
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
3709,
13,
27530,
1330,
9097,
198
] | 4.6 | 15 |
"""
Unit tests for the Drycc api app.
Run the tests with "./manage.py test api"
"""
from unittest import mock
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.conf import settings
from rest_framework.authtoken.models import Token
from api.models import Domain
from api.tests import DryccTestCase
import idna
User = get_user_model()
class DomainTest(DryccTestCase):
"""Tests creation of domains"""
fixtures = ['tests.json']
def test_response_data(self):
"""Test that the serialized response contains only relevant data."""
app_id = self.create_app()
response = self.client.post(
'/v2/apps/{}/domains'.format(app_id),
{'domain': 'test-domain.example.com'}
)
self.assertEqual(response.status_code, 201, response.data)
for key in response.data:
self.assertIn(key, ['uuid', 'owner', 'created', 'updated', 'app', 'domain'])
expected = {
'owner': self.user.username,
'app': app_id,
'domain': 'test-domain.example.com'
}
self.assertDictContainsSubset(expected, response.data)
def test_strip_dot(self):
"""Test that a dot on the right side of the domain gets stripped"""
domain = 'autotest.127.0.0.1.xip.io.'
msg = "failed on '{}'".format(domain)
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
# Create
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, msg)
# Fetch
domain = 'autotest.127.0.0.1.xip.io' # stripped version
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), domain]),
expected, msg)
def test_delete_domain_does_not_exist(self):
"""Remove a domain that does not exist"""
url = '/v2/apps/{app_id}/domains/{domain}'.format(domain='test-domain.example.com',
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 404)
def test_delete_domain_does_not_remove_latest(self):
"""https://github.com/drycc/drycc/issues/3239"""
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
test_domains = [
'test-domain.example.com',
'django.paas-sandbox',
]
for domain in test_domains:
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, response.data)
url = '/v2/apps/{app_id}/domains/{domain}'.format(domain=test_domains[0],
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204, response.data)
with self.assertRaises(Domain.DoesNotExist):
Domain.objects.get(domain=test_domains[0])
def test_delete_domain_does_not_remove_others(self):
"""https://github.com/drycc/drycc/issues/3475"""
self.test_delete_domain_does_not_remove_latest()
self.assertEqual(Domain.objects.all().count(), 2)
def test_admin_can_add_domains_to_other_apps(self):
"""If a non-admin user creates an app, an administrator should be able to add
domains to it.
"""
user = User.objects.get(username='autotest2')
token = Token.objects.get(user=user).key
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
app_id = self.create_app()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = '/v2/apps/{}/domains'.format(app_id)
response = self.client.post(url, {'domain': 'example.drycc.example.com'})
self.assertEqual(response.status_code, 201, response.data)
def test_unauthorized_user_cannot_modify_domain(self):
"""
An unauthorized user should not be able to modify other domains.
Since an unauthorized user should not know about the application at all, these
requests should return a 404.
"""
app_id = self.create_app()
unauthorized_user = User.objects.get(username='autotest2')
unauthorized_token = Token.objects.get(user=unauthorized_user).key
self.client.credentials(HTTP_AUTHORIZATION='Token ' + unauthorized_token)
url = '/v2/apps/{}/domains'.format(app_id)
response = self.client.post(url, {'domain': 'example.com'})
self.assertEqual(response.status_code, 403)
def test_kubernetes_service_failure(self):
"""
Cause an Exception in kubernetes services
"""
app_id = self.create_app()
# scheduler.svc.update exception
with mock.patch('scheduler.resources.service.Service.update'):
domain = 'foo.com'
url = '/v2/apps/{}/domains'.format(app_id)
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, response.data)
| [
37811,
198,
26453,
5254,
329,
262,
22408,
535,
40391,
598,
13,
198,
198,
10987,
262,
5254,
351,
366,
19571,
805,
496,
13,
9078,
1332,
40391,
1,
198,
37811,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
6738,
42625,
14208,
13,
3642,... | 2.271245 | 2,330 |
import sys
input = sys.stdin.readline
w,h = list(map(int,input().split(" ")))
if h%2 ==0 or w%2==0:
print("0")
elif h>=w:
print(w)
else:
print(h) | [
11748,
25064,
198,
198,
15414,
796,
25064,
13,
19282,
259,
13,
961,
1370,
198,
86,
11,
71,
796,
1351,
7,
8899,
7,
600,
11,
15414,
22446,
35312,
7203,
366,
22305,
198,
198,
361,
289,
4,
17,
6624,
15,
393,
266,
4,
17,
855,
15,
25,... | 1.9875 | 80 |
"""Module for searching the database based on user-supplied queries, and
display the result to the user.
"""
from todone import backend
from todone import printers
from todone.parser import factory
def list_items(args):
"""
Print a list of todos matching given search terms.
usage: todone list [.file] [folder/] [tags and keywords]
Search criteria can be any string expression.
Allowed folder keywords are any valid folder name, followed by
a slash. Examples: today/, next/, inbox/, someday/, done/. Shortened
versions accepted when unambiguous, so, for example "done/", "don/",
"do/", and "d/" all indicate the done folder.
If folder is not specified, the search is over all active
folders (default is: inbox/, next/, today/).
If folder is today/, then, in addition to items in the today
folder, items with a reminder or due date prior to or equal to
today's date are also included. This behavior may change in future
versions.
Allowed tags are:
due[+N{d|w|m|y}],
remind[+N{d|w|m|y}],
[project name]
The remainder of the search string provides keywords that must
appear in the todo title. However, searches are always case
insensitive.
If .file is specified, then search results are saved to .file.
If no search criteria is provided, then the todos in the given file
are listed. If no search criteria and no file is specified, then
the most recently run search is listed.
E.g.,
> todone list .my_search today/ @Work
Lists all today items containing tag @Work, and saves to .my_search
> todone list n/due+1w [My Project]
Lists all next items from project [My Project] due in
the next week
> todone list
Repeats most recent search
> todone list .my_search
Repeats list from first search
> todone list
Repeats list from first search
"""
parsed_args = parse_args(args)
if is_loading_saved_search(parsed_args):
query = backend.SavedList.get_todos_in_list(parsed_args['file'])
else:
query = backend.Todo.query(**parsed_args)
backend.SavedList.save_search(parsed_args['file'], query)
backend.SavedList.save_most_recent_search(query)
printers.print_todo_list(query)
list_items.short_help = """
usage: todone list [.file] [folder/] [tags and keywords]
"""
| [
37811,
26796,
329,
10342,
262,
6831,
1912,
319,
2836,
12,
18608,
18511,
20743,
11,
290,
198,
13812,
262,
1255,
284,
262,
2836,
13,
198,
37811,
198,
6738,
284,
28060,
1330,
30203,
198,
6738,
284,
28060,
1330,
34654,
198,
6738,
284,
28060... | 2.869976 | 846 |
import pytest
from csp_tool.csp_analyser import get_domains_per_directive, rollup_data_by_header, \
check_unsafe_inline_used_without_domains, extract_policies_without_domains, \
check_keyword_used_without_domains, extract_policies_using_localhost
@pytest.mark.parametrize(
"header,expected_num_rows,biggest_count_row,expected_count",
[
('full_csp', 4, 3, '2'),
('has_unsafe_inline', 1, 0, '5'),
('has_unsafe_eval', 2, 1, '1'),
('refs_localhost', 2, 1, '1'),
],
)
| [
11748,
12972,
9288,
198,
6738,
269,
2777,
62,
25981,
13,
66,
2777,
62,
272,
26266,
263,
1330,
651,
62,
3438,
1299,
62,
525,
62,
12942,
425,
11,
4836,
929,
62,
7890,
62,
1525,
62,
25677,
11,
3467,
198,
220,
220,
220,
2198,
62,
1327... | 2.240506 | 237 |
'''
This module contains the Neuronaletwork class and its methods.
Created on 29.09.2019
@author: D.Ramonat
'''
import numpy as np | [
7061,
6,
198,
1212,
8265,
4909,
262,
3169,
333,
20996,
316,
1818,
1398,
290,
663,
5050,
13,
198,
198,
41972,
319,
2808,
13,
2931,
13,
23344,
198,
198,
31,
9800,
25,
360,
13,
49,
16487,
265,
198,
7061,
6,
198,
11748,
299,
32152,
35... | 2.933333 | 45 |
import logging
import pytest
from piqe_ocp_lib import __loggername__
from piqe_ocp_lib.api.monitoring.ocp_cluster_stats_prometheus import OcpClusterStatsPrometheus
logger = logging.getLogger(__loggername__)
@pytest.fixture(scope="session")
| [
11748,
18931,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
279,
25011,
68,
62,
420,
79,
62,
8019,
1330,
11593,
6404,
70,
13292,
834,
198,
6738,
279,
25011,
68,
62,
420,
79,
62,
8019,
13,
15042,
13,
41143,
278,
13,
420,
79,
62,
56... | 2.764045 | 89 |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from .base_model_ import Model
from .. import util
class AuditLogEntry(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, status: str=None, timestamp: str=None, data: object=None): # noqa: E501
"""AuditLogEntry - a model defined in Swagger
:param status: The status of this AuditLogEntry. # noqa: E501
:type status: str
:param timestamp: The timestamp of this AuditLogEntry. # noqa: E501
:type timestamp: str
:param data: The data of this AuditLogEntry. # noqa: E501
:type data: object
"""
self.swagger_types = {
'status': str,
'timestamp': str,
'data': object
}
self.attribute_map = {
'status': 'status',
'timestamp': 'timestamp',
'data': 'data'
}
self._status = status
self._timestamp = timestamp
self._data = data
@classmethod
def from_dict(cls, dikt) -> 'AuditLogEntry':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The AuditLogEntry of this AuditLogEntry. # noqa: E501
:rtype: AuditLogEntry
"""
return util.deserialize_model(dikt, cls)
@property
def status(self) -> str:
"""Gets the status of this AuditLogEntry.
The type of the last action that was carried out on the document. # noqa: E501
:return: The status of this AuditLogEntry.
:rtype: str
"""
return self._status
@status.setter
def status(self, status: str):
"""Sets the status of this AuditLogEntry.
The type of the last action that was carried out on the document. # noqa: E501
:param status: The status of this AuditLogEntry.
:type status: str
"""
allowed_values = ["created", "viewed", "stamp_failed", "stamp_success", "updated"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}"
.format(status, allowed_values)
)
self._status = status
@property
def timestamp(self) -> str:
"""Gets the timestamp of this AuditLogEntry.
An ISO format timestamp indicating the date and time that the event occurred at. # noqa: E501
:return: The timestamp of this AuditLogEntry.
:rtype: str
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp: str):
"""Sets the timestamp of this AuditLogEntry.
An ISO format timestamp indicating the date and time that the event occurred at. # noqa: E501
:param timestamp: The timestamp of this AuditLogEntry.
:type timestamp: str
"""
if timestamp is None:
raise ValueError("Invalid value for `timestamp`, must not be `None`") # noqa: E501
self._timestamp = timestamp
@property
def data(self) -> object:
"""Gets the data of this AuditLogEntry.
An object containing extra details about the status. # noqa: E501
:return: The data of this AuditLogEntry.
:rtype: object
"""
return self._data
@data.setter
def data(self, data: object):
"""Sets the data of this AuditLogEntry.
An object containing extra details about the status. # noqa: E501
:param data: The data of this AuditLogEntry.
:type data: object
"""
self._data = data
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
4818,
8079,
1330,
3128,
11,
4818,
8079,
220,
1303,
645,
20402,
25,
376,
21844,
198,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
220... | 2.43634 | 1,563 |
import rospy
from rospy import ServiceProxy
from std_srvs.srv import (Empty, EmptyRequest, EmptyResponse)
from ExperimentOrchestrator.Architecture.Singleton import Singleton | [
11748,
686,
2777,
88,
198,
6738,
686,
2777,
88,
1330,
4809,
44148,
198,
6738,
14367,
62,
27891,
14259,
13,
27891,
85,
1330,
357,
40613,
11,
33523,
18453,
11,
33523,
31077,
8,
198,
198,
6738,
29544,
5574,
2395,
2536,
1352,
13,
19895,
5... | 3.55102 | 49 |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import numpy as np
import matplotlib.pyplot as plt
# %%
file_list=[]
for i in range(50):
file_list.append('Ps-Pb{}.csv'.format(i))
rdata=np.ndarray((50,50,50))
rd_mean=np.ndarray((50,50))
for i in range(50):
file=np.loadtxt(open(file_list[i],'r'), dtype='str', delimiter=",")
for j in range(50):
for k in range(50):
rdata[j,k,i]=int(file[k,j])
for i in range(50):
for j in range(50):
rd_mean[j,i]=np.mean(rdata[i,j])
# %%
for i in range(50):
for j in range(50):
rd_mean[i,j]-=(j)*0.02*10000
# %%
plt.figure(dpi=150,figsize=(12,10))
Pslist=np.linspace(0,0.99,50)
Pblist=np.linspace(0,1,50)
c=plt.pcolormesh(Pslist,Pblist,rd_mean, cmap="plasma")
plt.title('density of Quarantine - density of Prevention (Local)')
plt.colorbar(c)
plt.xlabel('density of Prevention (Ps)')
plt.ylabel('density of Quarantine (Pb)')
plt.savefig('Ps-Pb')
# %%
plt.figure(dpi=150,figsize=(12,10))
plt.xlim((0,0.5))
plt.ylim((0,0.5))
Pslist=np.linspace(0,0.99,50)
Pblist=np.linspace(0,1,50)
c=plt.pcolormesh(Pslist,Pblist,rd_mean, cmap="plasma")
plt.title('density of Quarantine - density of Prevention (Local)')
plt.colorbar(c)
plt.xlabel('density of Prevention (Ps)')
plt.ylabel('density of Quarantine (Pb)')
plt.savefig('Ps-Pb(limited)')
# %%
| [
2,
1675,
751,
257,
649,
2685,
11,
2099,
705,
2,
43313,
6,
198,
2,
1675,
751,
257,
649,
1317,
2902,
2685,
11,
2099,
705,
2,
43313,
685,
4102,
2902,
49946,
198,
2,
43313,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
... | 2.121166 | 652 |
# Copyright (c) 2013 Rackspace, Inc.
# Copyright (c) 2015 Catalyst IT Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import re
from oslo_config import cfg
from oslo_utils import timeutils
import six
from zaqar.i18n import _
MIN_MESSAGE_TTL = 60
MIN_CLAIM_TTL = 60
MIN_CLAIM_GRACE = 60
MIN_SUBSCRIPTION_TTL = 60
_TRANSPORT_LIMITS_OPTIONS = (
cfg.IntOpt('max_queues_per_page', default=20,
deprecated_name='queue_paging_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum number of queues per page.'),
cfg.IntOpt('max_messages_per_page', default=20,
deprecated_name='message_paging_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum number of messages per page.'),
cfg.IntOpt('max_subscriptions_per_page', default=20,
deprecated_name='subscription_paging_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum number of subscriptions per page.'),
cfg.IntOpt('max_messages_per_claim_or_pop', default=20,
deprecated_name='max_messages_per_claim',
help='The maximum number of messages that can be claimed (OR) '
'popped in a single request'),
cfg.IntOpt('max_queue_metadata', default=64 * 1024,
deprecated_name='metadata_size_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum amount of metadata in a queue.'),
cfg.IntOpt('max_messages_post_size', default=256 * 1024,
deprecated_name='message_size_uplimit',
deprecated_group='limits:transport',
deprecated_opts=[cfg.DeprecatedOpt('max_message_size')],
help='Defines the maximum size of message posts.'),
cfg.IntOpt('max_message_ttl', default=1209600,
deprecated_name='message_ttl_max',
deprecated_group='limits:transport',
help='Maximum amount of time a message will be available.'),
cfg.IntOpt('max_claim_ttl', default=43200,
deprecated_name='claim_ttl_max',
deprecated_group='limits:transport',
help='Maximum length of a message in claimed state.'),
cfg.IntOpt('max_claim_grace', default=43200,
deprecated_name='claim_grace_max',
deprecated_group='limits:transport',
help='Defines the maximum message grace period in seconds.'),
cfg.ListOpt('subscriber_types', default=['http', 'https', 'mailto'],
help='Defines supported subscriber types.'),
)
_TRANSPORT_LIMITS_GROUP = 'transport'
# NOTE(kgriffs): Don't use \w because it isn't guaranteed to match
# only ASCII characters.
QUEUE_NAME_REGEX = re.compile('^[a-zA-Z0-9_\-]+$')
QUEUE_NAME_MAX_LEN = 64
PROJECT_ID_MAX_LEN = 256
class ValidationFailed(ValueError):
"""User input did not follow API restrictions."""
| [
2,
15069,
357,
66,
8,
2211,
37927,
13200,
11,
3457,
13,
198,
2,
15069,
357,
66,
8,
1853,
48238,
7283,
12052,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
7... | 2.481299 | 1,417 |
def solve_tilt(B, t):
'''
Input: B | Starting board configuration
t | Tuple t = (x, y) representing the target square
Output: M | List of moves that solves B (or None if B not solvable)
'''
M = []
##################
# YOUR CODE HERE #
##################
return M
####################################
# USE BUT DO NOT MODIFY CODE BELOW #
####################################
def move(B, d):
'''
Input: B | Board configuration
d | Direction: either 'up', down', 'left', or 'right'
Output: B_ | New configuration made by tilting B in direction d
'''
n = len(B)
B_ = list(list(row) for row in B)
if d == 'up':
for x in range(n):
y_ = 0
for y in range(n):
if (B_[y][x] == 'o') and (B_[y_][x] == '.'):
B_[y][x], B_[y_][x] = B_[y_][x], B_[y][x]
y_ += 1
if (B_[y][x] != '.') or (B_[y_][x] != '.'):
y_ = y
if d == 'down':
for x in range(n):
y_ = n - 1
for y in range(n - 1, -1, -1):
if (B_[y][x] == 'o') and (B_[y_][x] == '.'):
B_[y][x], B_[y_][x] = B_[y_][x], B_[y][x]
y_ -= 1
if (B_[y][x] != '.') or (B_[y_][x] != '.'):
y_ = y
if d == 'left':
for y in range(n):
x_ = 0
for x in range(n):
if (B_[y][x] == 'o') and (B_[y][x_] == '.'):
B_[y][x], B_[y][x_] = B_[y][x_], B_[y][x]
x_ += 1
if (B_[y][x] != '.') or (B_[y][x_] != '.'):
x_ = x
if d == 'right':
for y in range(n):
x_ = n - 1
for x in range(n - 1, -1, -1):
if (B_[y][x] == 'o') and (B_[y][x_] == '.'):
B_[y][x], B_[y][x_] = B_[y][x_], B_[y][x]
x_ -= 1
if (B_[y][x] != '.') or (B_[y][x_] != '.'):
x_ = x
B_ = tuple(tuple(row) for row in B_)
return B_
def board_str(B):
'''
Input: B | Board configuration
Output: s | ASCII string representing configuration B
'''
n = len(B)
rows = ['+' + ('-'*n) + '+']
for row in B:
rows.append('|' + ''.join(row) + '|')
rows.append(rows[0])
S = '\n'.join(rows)
return S
| [
4299,
8494,
62,
83,
2326,
7,
33,
11,
256,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
23412,
25,
220,
347,
930,
17962,
3096,
8398,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
256,
930,
309,
29291,
25... | 1.688366 | 1,444 |
ad_list = {}
for line in open('input.txt', 'r').readlines():
rules = line.strip().replace('.', '').replace('contain', '').replace(',', ' ').split(' ')
master_key = None
for i in range(len(rules)):
if i == 0:
split_rule = rules[i].split(' ')
key = split_rule[0] + split_rule[1]
if key not in ad_list:
ad_list[key] = []
master_key = key
else:
split_rule = rules[i].split(' ')
number_key = 0 if split_rule[0] == 'no' else int(split_rule[0])
ad_list[master_key].append((number_key, split_rule[1] + split_rule[2]))
print(recurse('shinygold'))
| [
324,
62,
4868,
796,
23884,
198,
198,
1640,
1627,
287,
1280,
10786,
15414,
13,
14116,
3256,
705,
81,
27691,
961,
6615,
33529,
198,
197,
38785,
796,
1627,
13,
36311,
22446,
33491,
10786,
2637,
11,
10148,
737,
33491,
10786,
3642,
391,
3256... | 2.327935 | 247 |
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.utils.dates import days_ago
with DAG(
dag_id="airflow_variables_atruntime",
schedule_interval=None,
catchup=False,
start_date=days_ago(1)) as dag:
get_var_filename = BashOperator(
task_id="get_var_filename",
bash_command="""echo 'You are running this DAG with the following variable file: "{{ dag_run.conf["cli_test"] if dag_run.conf else var.value.cli_test }}"'""",
)
get_var_filename = BashOperator(
task_id="get_var_filename2",
bash_command='echo "You are running this DAG with the following variable file: \'{{ dag_run.conf["cli_test2"] if dag_run.conf else var.value.cli_test2 }}\'"',
)
get_var_filename = BashOperator(
task_id="get_var_filename3",
bash_command='echo "You are running this DAG with the following variable file: \'{{ dag_run.conf["cli_test3"] if dag_run.conf else var.value.cli_test3 }}\'"',
)
#var.value.cli_test
| [
6738,
45771,
1330,
360,
4760,
198,
6738,
45771,
13,
3575,
2024,
13,
41757,
62,
46616,
1330,
15743,
18843,
1352,
198,
6738,
45771,
13,
26791,
13,
19581,
1330,
1528,
62,
3839,
198,
198,
4480,
360,
4760,
7,
198,
220,
220,
220,
48924,
62,... | 2.639896 | 386 |
import re
import datetime
import logging
import pathlib
import datetime
import jinja2
import importlib.resources as pkg_resources
from collections import Counter
from . import templates, format_logger
from .program import sasProgram
class sasProject(object):
"""
Abstracted SAS project class.
A SAS project is a collection of individual SAS programs that combine,
use the same library, include each other, or generally create datasets used by
each other in such away that they can be considered largly part of the same piece
of work.
...
Attributes
----------
path : pathlib.Path
File path to the root directory of the project
programs : [sasProgram]
List of parsed .sas programs found in the project root or subfolders
macroVariables : [macroVariableDefinition]
List of all macro variable defintions found in all programs in the project
"""
def load_project(self, path):
"""
load_project(path)
Search the given path recursively to find all .sas files, then generate sasProgram objects
from any valid sas programs found.
Sets values of path and programs.
Parameters
----------
path : str
The root file path of the project .
"""
try:
self.path = pathlib.Path(path).resolve(strict=True)
except Exception as e:
self.logger.exception("Unable to resolve path: {}".format(e))
return False
try:
programPaths = self.path.rglob('*.sas')
except Exception as e:
self.logger.exception("Unable to search folder: {}".format(e))
return False
try:
self.add_programs_to_project(programPaths)
except Exception as e:
self.logger.exception("Unable to add programs to project: {}".format(e))
return False
# self.macroVariables = {d.variable:d.value for d in self.get_objects(objectType='macroVariableDefinition')}
def add_programs_to_project(self, programPaths):
"""
add_programs_to_project(programPaths)
For a list of found paths to .sas files in the project directory, generate sasProgram objects. If any sasProgram
objects contain an include object, where possible follow the path in the %include statement, parse file and add to
the project's programs list.
Does not parse the program if the path has already been visited.
Parameters
----------
programPaths : list
List of discovered program paths in the project's directories.
"""
for path in programPaths:
if path not in [program.path for program in self.programs]:
self.programs.append(sasProgram(path))
includePaths = set(include.path for include in self.get_objects(objectType='include'))
while includePaths.difference(set([program.path for program in self.programs])):
for path in includePaths:
self.programs.append(sasProgram(path))
includePaths = set(include.path for include in self.get_objects(objectType='include'))
self.programs = [program for program in self.programs if program.failedLoad != 1]
def add_addtional_documentation_to_project(self):
"""
Add any documenation found in the project as an attribute.
Creates readme and documentation attributes.
"""
mdPaths = self.path.glob('*.md')
# Test for README in root directory
readMe = self.path.joinpath('readme.md')
if readMe.is_file():
with self.path.joinpath('readme.md').open() as f:
self.readme = f.read()
self.readme = re.sub(r'(^#+\s)',r'#\1',self.readme,flags=re.M)
else:
self.readme = ''
docs = {}
for path in mdPaths:
with path.open() as f:
docs[path.relative_to(self.path)] = f.read()
self.documentation['additional'] = docs
def summarise_project(self):
"""
summarise_objects()
Recursively loop through parsed objects in the project's programs, counting each object by object type.
This function will count macros and the contents of said macros.
Returns
-------
ObjCounter : Counter
Collections Counter object for all sasdoc.object types found in all programs in the project.
ProgramCount : dict
Dictionary containing a object Counter for each program found in the project
"""
objectCounter = Counter()
programCounter = dict()
for program in self.programs:
cnt = program.summarise_objects()
objectCounter += cnt
programCounter[program] = dict(cnt)
return objectCounter, programCounter
def get_objects(self, objectType=None):
"""
get_objects(objectType=None)
Recursively loop through parsed programs in the project, yielding each sasdocs object. If the object
is a macro object, enter and yield sas objects found in the macro's contents.
This function will never return a macro object.
If passed with optional objectType, this function will only yield objects of type equal to objectType.
Parameters
----------
objectType : str
If not none, only yield objects where the object is of type objectType.
Yields
------
sasdocs.object
"""
for program in self.programs:
yield from program.get_objects(objectType=objectType)
def get_extended_info(self):
"""
get_extended_info
Creates class attributes for information about the SAS project.
.. code-block:: rst
name : Filename of the SAS code,
path : Full path to the SAS code,
programs : Number of programs found in the project,
summary : Counter object returned by summarise_objects,
objects : Dictionary of Counter objects indexed by program
"""
objSum, prgSum = self.summarise_project()
self.name = self.path.name
self.nPrograms = len(self.programs)
self.summary = dict(objSum)
self.objects = dict(prgSum)
self.buildTime = "{:%Y-%m-%d %H:%M}".format(datetime.datetime.now())
def generate_documentation(self, macroOnly=False):
"""
generate_documentation(outputDirectory=None)
Generate documentation for the project using the jinja2 templates
"""
documentation = {}
if not macroOnly:
for program in self.programs:
documentation[program.name] = program.generate_documentation()
template = jinja2.Template(pkg_resources.read_text(templates, 'macro.md'))
documentation['macros']=template.render(program=self)
return documentation
| [
11748,
302,
201,
198,
11748,
4818,
8079,
220,
201,
198,
11748,
18931,
201,
198,
11748,
3108,
8019,
201,
198,
11748,
4818,
8079,
201,
198,
11748,
474,
259,
6592,
17,
201,
198,
201,
198,
11748,
1330,
8019,
13,
37540,
355,
279,
10025,
62... | 2.330073 | 3,169 |
# ## PART 2 - read the power plant database, select different type power plant, and reference plant emission for both ccs and baseline
import pandas as pd
import sys
import json
# import time
from screen_ccs import ccs_screen
from emission_ccs import emission_aggregation_ccs
from post_process_ccs import bau_ccs_post, ccs_results
# rootPath = 'C:/Users/WE/WebstormProjects/JPS_EMISSIONS/'
rootPath = json.loads(sys.argv[1])
# load the powerplant database
# df = pd.read_csv(rootPath + 'data/input/powerplant_database.csv', header='infer', sep=',')
# df = pd.read_csv(rootPath + 'data/output/result.csv', header='infer', sep=',')
from world_powerplants_sparql import WorldPowerPlantsSPARQL
from powerplant_sparql_sync import PowerplantSPARQLSync
wPSPARQL = WorldPowerPlantsSPARQL()
powerplants = wPSPARQL.getPowerplants()
# start_time = time.time()
listDict = []
for powerplant in powerplants:
pSPARQL = PowerplantSPARQLSync(powerplant)
powerplantInfo = pSPARQL.getPowerplantInfo()
listDict.append(powerplantInfo)
df = pd.DataFrame(listDict,
columns=['country', 'capacity_MW', 'primary_fuel',
'generation_technology', 'age', 'output_MWh', 'fuel_used'])
# print(df.dtypes)
# df.to_csv(rootPath + 'data/output/result.csv', index=False)
# print("{} seconds".format(time.time() - start_time))
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(df)
### 2.1 coal
### ultrasupercritical
# choose ultrasupercritical PC from the database
df_01 = df[df.generation_technology == 'ultrasupercritical']
# load the emission inventory table for baseline scenario
dfb_1 = pd.read_csv(rootPath + 'data/input/baseplant/base_ultrasupercritical_PC_coal.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_1 = pd.read_csv(rootPath + 'data/input/ccs/capture_ultrasupercritical_PC_coal.csv', header='infer', sep=',')
# add age column to the dataframe
df_tem = pd.read_csv(rootPath + 'data/input/ccs/ages.csv', header='infer', sep=',')
ages = df_tem.loc[:,('age')].values
df_1['age'] = ages
# ### supercritical anthracite
# choose anthracite and supercritical PC from the database
df_m = df[df.generation_technology == 'supercritical']
df_02 = df_m[df_m.primary_fuel == 'anthracite']
# load the emission inventory table for baseline scenario
dfb_2 = pd.read_csv(rootPath + 'data/input/baseplant/base_supercritical_PC_anthracite.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_2 = pd.read_csv(rootPath + 'data/input/ccs/capture_supercritical_PC_anthracite.csv', header='infer', sep=',')
# add age column to the dataframe
df_2['age'] = ages
# ### supercritical bituminous and coal
# choose anthracite and supercritical PC from the database
df_03 = df_m[(df_m.primary_fuel == 'bituminous') | (df_m.primary_fuel == 'coal')]
# load the emission inventory table for baseline scenario
dfb_3 = pd.read_csv(rootPath + 'data/input/baseplant/base_supercritical_PC_bituminous.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_3 = pd.read_csv(rootPath + 'data/input/ccs/capture_supercritical_PC_bituminous.csv', header='infer', sep=',')
# add age column to the dataframe
df_3['age'] = ages
# ### supercritical subbituminous
# choose anthracite and supercritical PC from the database
df_04 = df_m[df_m.primary_fuel == 'subbituminous']
# load the emission inventory table for baseline scenario
dfb_4 = pd.read_csv(rootPath + 'data/input/baseplant/base_supercritical_PC_subbituminous.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_4 = pd.read_csv(rootPath + 'data/input/ccs/capture_supercritical_PC_subbituminous.csv', header='infer', sep=',')
# add age column to the dataframe
df_4['age'] = ages
# ### supercritical lignite
# choose anthracite and supercritical PC from the database
df_05 = df_m[df_m.primary_fuel == 'lignite']
# load the emission table
dfb_5 = pd.read_csv(rootPath + 'data/input/baseplant/base_supercritical_PC_lignite.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_5 = pd.read_csv(rootPath + 'data/input/ccs/capture_supercritical_PC_lignite.csv', header='infer', sep=',')
# add age column to the dataframe
df_5['age'] = ages
# ### subcritical anthracite
# choose anthracite and subcritical PC from the database
df_n = df[df.generation_technology == 'subcritical']
df_06 = df_n[df_n.primary_fuel == 'anthracite']
# load the emission table
dfb_6 = pd.read_csv(rootPath + 'data/input/baseplant/base_subcritical_PC_anthracite.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_6 = pd.read_csv(rootPath + 'data/input/ccs/capture_subcritical_PC_anthracite.csv', header='infer', sep=',')
# add age column to the dataframe
df_6['age'] = ages
# ### subcritical bituminous and coal
# choose anthracite and supercritical PC from the database
df_coal = df[df.fuel_used == 'coal']
df_07 = df_coal[(df_coal.primary_fuel == 'bituminous') | (df_coal.primary_fuel == 'coal') | (df_coal.generation_technology == 'cogeneration')]
# load the emission table
dfb_7 = pd.read_csv(rootPath + 'data/input/baseplant/base_subcritical_PC_bituminous.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_7 = pd.read_csv(rootPath + 'data/input/ccs/capture_subcritical_PC_bituminous.csv', header='infer', sep=',')
# add age column to the dataframe
df_7['age'] = ages
# ### subcritical subbituminous
# choose anthracite and supercritical PC from the database
df_08 = df_n[df_n.primary_fuel == 'subbituminous']
# load the emission table
dfb_8 = pd.read_csv(rootPath + 'data/input/baseplant/base_subcritical_PC_subbituminous.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_8 = pd.read_csv(rootPath + 'data/input/ccs/capture_subcritical_PC_subbituminous.csv', header='infer', sep=',')
# add age column to the dataframe
df_8['age'] = ages
# ### subcritical lignite
# choose anthracite and supercritical PC from the database
df_09 = df_n[df_n.primary_fuel == 'lignite']
# load the emission table
dfb_9 = pd.read_csv(rootPath + 'data/input/baseplant/base_subcritical_PC_lignite.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_9 = pd.read_csv(rootPath + 'data/input/ccs/capture_subcritical_PC_lignite.csv', header='infer', sep=',')
# add age column to the dataframe
df_9['age'] = ages
# ### subcritical coal_biomass
# choose anthracite and supercritical PC from the database
df_010 = df_n[df_n.primary_fuel == 'coal_biomass']
# load the emission table
dfb_10 = pd.read_csv(rootPath + 'data/input/baseplant/base_subcritical_PC_coal_biomass.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_10 = pd.read_csv(rootPath + 'data/input/ccs/capture_subcritical_PC_coal_biomass.csv', header='infer', sep=',')
# add age column to the dataframe
df_10['age'] = ages
# ### 2.2 natural gas plant
# choose natural gas plant from the database
df_011 = df[df.primary_fuel == 'natural_gas']
# load the emission table
dfb_11 = pd.read_csv(rootPath + 'data/input/baseplant/base_NGCC.csv', header='infer', sep=',')
dfb_11 = dfb_11.sort_values(by=['age','capacity_MW'], ascending=[True,True])
# load the emission inventory table for ccs scenario
df_11 = pd.read_csv(rootPath + 'data/input/ccs/NGCC_capture.csv', header='infer', sep=',')
df_11 = df_11.sort_values(by=['age','capacity_MW'], ascending=[True,True])
# ### 2.3 oil
# choose oil plant from the database
df_012 = df[df.primary_fuel == 'oil']
# load the emission table
dfb_12 = pd.read_csv(rootPath + 'data/input/baseplant/base_NGCC_oil.csv', header='infer', sep=',')
dfb_12 = dfb_12.sort_values(by=['age','capacity_MW'], ascending=[True,True])
# load the emission inventory table for ccs scenario
df_12 = pd.read_csv(rootPath + 'data/input/ccs/NGCC_capture_oil.csv', header='infer', sep=',')
df_12 = df_12.sort_values(by=['age','capacity_MW'], ascending=[True,True])
# ## PART 3 - emission and economics analysis
# ### 3.1 get important parameter input from user interface (e.g. co2 tax and technology learning rate)
# get carbonprice from user interface, choose from 0, 10, 20, 50, 100
# carbonprice = 0
carbonprice = int(json.loads(sys.argv[2]))
# get technology learning rate from user interface, choose from high, middle, low
# coalccslearningrate = 'high'
# gasccslearningrate = 'high'
coalccslearningrate = sys.argv[3] # not needed for string
gasccslearningrate = sys.argv[3]
# ### 3.2 assume the newly added capacity share same decomposition as plant fleet between age 0 and 5
# get the plant list of age between 0 and 5
df_01 = df_01[(df_01.age >0) & (df_01.age <= 5)]
df_02 = df_02[(df_02.age >0) & (df_02.age <= 5)]
df_03 = df_03[(df_03.age >0) & (df_03.age <= 5)]
df_04 = df_04[(df_04.age >0) & (df_04.age <= 5)]
df_05 = df_05[(df_05.age >0) & (df_05.age <= 5)]
df_06 = df_06[(df_06.age >0) & (df_06.age <= 5)]
df_07 = df_07[(df_07.age >0) & (df_07.age <= 5)]
df_08 = df_08[(df_08.age >0) & (df_08.age <= 5)]
df_09 = df_09[(df_09.age >0) & (df_09.age <= 5)]
df_010 = df_010[(df_010.age >0) & (df_010.age <= 5)]
df_011 = df_011[(df_011.age >0) & (df_011.age <= 5)]
df_012 = df_012[(df_012.age >0) & (df_012.age <= 5)]
plant_list = [df_01, df_02, df_03, df_04, df_05, df_06, df_07, df_08, df_09, df_010, df_011, df_012]
emission_list_b = [dfb_1, dfb_2, dfb_3, dfb_4, dfb_5, dfb_6, dfb_7, dfb_8, dfb_9, dfb_10, dfb_11, dfb_12]
emission_list_ccs = [df_1, df_2, df_3, df_4, df_5, df_6, df_7, df_8, df_9, df_10, df_11, df_12]
# ### 3.3 screen the newly added plant whether ccs is possible or not
# load the CCS learning rate for coal and gas
# ues the test learning rate at constant 0.8
# ccs_learning_coal = pd.read_csv(rootPath + 'data/input/ccs/ccs_technology_learning_test.csv', header='infer', sep=',')
ccs_learning_coal = pd.read_csv(rootPath + 'data/input/ccs/coal_ccs_technology_learning.csv', header='infer', sep=',')
ccs_learning_gas = pd.read_csv(rootPath + 'data/input/ccs/gas_ccs_technology_learning.csv', header='infer', sep=',')
# set the learning rate to high
ccs_learning_coal = ccs_learning_coal[ccs_learning_coal.learning_rate == coalccslearningrate]
ccs_learning_gas = ccs_learning_gas[ccs_learning_gas.learning_rate == gasccslearningrate]
# select year and plant LCOE decrease
ccs_learning_coal = ccs_learning_coal[['year','plant_LCOE_decrease']]
ccs_learning_coal = ccs_learning_coal.reset_index(drop=True)
ccs_learning_gas = ccs_learning_gas[['year','plant_LCOE_decrease']]
ccs_learning_gas = ccs_learning_gas.reset_index(drop=True)
# set the key parameters
# set the refernce cost for coal, gas, and oil plant without carbon price respectively
coal_reference_cost_base = 30
gas_reference_cost_base = 60
oil_reference_cost_base = 60
# set the carbon price
carbon_price = carbonprice
# set the capacity factor
capacity_factor = 0.75
# load the CCS table
ccs = pd.read_csv(rootPath + 'data/output/BAU_CCS/input_bau_ccs_learning_high.csv', header='infer', sep=',')
# set the refernce cost for coal, gas, and oil plant with carbon price respectively
coal_reference_cost = coal_reference_cost_base + (carbon_price * 1144) / (1500 * capacity_factor)
gas_reference_cost = gas_reference_cost_base + (carbon_price * 214) / (600 * capacity_factor)
oil_reference_cost = oil_reference_cost_base + (carbon_price * 214) / (600 * capacity_factor)
# calculate the vintage change of powerplant database during 35 years: from 2015 to 2050
import copy
w = {} # emission inventory of existing plant type j at year i
k = {} # ccs possible plant from newly retired plant type j at year i
plant = {} # newly built plant database for type j
plant_m = {} # exisitng plant database for type j
plant_n = {} # ccs plant database for type j
coal_emission_w = {} # overall coal emission inventory at year i
gas_emission_w = {} # overall gas emission inventory at year i
oil_emission_w = {} # overall oil emission inventory at year i
coal_emission_k = {} # overall coal emission inventory at year i
gas_emission_k = {} # overall gas emission inventory at year i
oil_emission_k = {} # overall oil emission inventory at year i
em_li_ccs = {} #emission list
# load the CCS table
ccs = pd.read_csv(rootPath + 'data/output/BAU_CCS/input_bau_ccs_learning_high.csv', header='infer', sep=',')
# open('bau_ccs_learn.txt', 'w').close()
for i in range(36):
w[i] = {}
k[i] = {}
#emission_list_ccs[i] = emission_list_ccs
em_li_ccs[i] = {}
M = copy.deepcopy(emission_list_ccs)
for j in range(12):
# get the newly built plant database
plant[j] = plant_list[j]
# set the reference cost to screen ccs possible plants based on fuel type
if j >= 0 and j <= 9:
reference_cost = coal_reference_cost
learning_rate = ccs_learning_coal.at[i,'plant_LCOE_decrease']
elif j == 10:
reference_cost = gas_reference_cost
learning_rate = ccs_learning_gas.at[i,'plant_LCOE_decrease']
else:
reference_cost = oil_reference_cost
learning_rate = ccs_learning_gas.at[i,'plant_LCOE_decrease']
em_li_ccs[i][j] = M[j]
# set the carbon price and capacity factor
em_li_ccs[i][j]['carbon_price_ton'] = carbon_price
em_li_ccs[i][j]['capacity_factor'] = capacity_factor
# set the capture plant LCOE at year i according to learning rate
em_li_ccs[i][j]['capture_plant_LCOE_MWh'] = em_li_ccs[i][j]['capture_plant_LCOE_MWh'] * (1-learning_rate)
em_li_ccs[i][j]['carbon_LCOE_MWh'] = (emission_list_ccs[j]['emission_rate_ton_h'] * em_li_ccs[i][j]['carbon_price_ton']) / (emission_list_ccs[j]['capacity_MW'] *em_li_ccs[i][j]['capacity_factor'])
em_li_ccs[i][j]['capture_carbon_LCOE_MWh'] = (emission_list_ccs[j]['capture_emission_rate_ton_h'] * em_li_ccs[i][j]['carbon_price_ton']) / (emission_list_ccs[j]['capacity_MW'] *em_li_ccs[i][j]['capacity_factor'])
em_li_ccs[i][j]['LCOE_MWh'] =em_li_ccs[i][j]['carbon_LCOE_MWh'] +em_li_ccs[i][j]['plant_LCOE_MWh']
em_li_ccs[i][j]['capture_LCOE_MWh'] =em_li_ccs[i][j]['capture_carbon_LCOE_MWh'] + em_li_ccs[i][j]['capture_plant_LCOE_MWh']
# screen ccs possible plant from newly built plant
w[i][j] = ccs_screen (plant[j], em_li_ccs[i][j], reference_cost, i)[0]
k[i][j] = ccs_screen (plant[j], em_li_ccs[i][j], reference_cost, i)[1]
# print("plant df: %s" %(j+1))
# text_file = open("bau_ccs_learn.txt", "a")
# text_file.write("plant type: %s, " %(j+1))
# text_file.write("newly built plant number: %s, capacity: %s " %(plant[j].shape[0], plant[j]['capacity_MW'].sum()))
# text_file.write("newly built ccs possible plant number: %s, capacity: %s " %(w[i][j].shape[0], w[i][j]['capacity_MW'].sum()))
# text_file.write('\n')
# aggregated ccs possible new coal power plant
coal_emission_w[i+2015] = pd.concat([w[i][0],w[i][1],w[i][2],w[i][3],w[i][4],w[i][5],w[i][6],w[i][7],w[i][8],w[i][9]],
ignore_index=True, sort=False)
coal_emission_w[i+2015]['fuel_used'] = 'coal'
# aggregated ccs not possible new coal power plant
coal_emission_k[i+2015] = pd.concat([k[i][0],k[i][1],k[i][2],k[i][3],k[i][4],k[i][5],k[i][6],k[i][7],k[i][8],k[i][9]],
ignore_index=True, sort=False)
coal_emission_k[i+2015]['fuel_used'] = 'coal'
# aggregated ccs possible new gas power plant
gas_emission_w[i+2015] = w[i][10]
gas_emission_w[i+2015]['fuel_used'] = 'gas'
# aggregated ccs not possible new gas power plant
gas_emission_k[i+2015] = k[i][10]
gas_emission_k[i+2015]['fuel_used'] = 'gas'
# aggregated ccs possible new oil power plant
oil_emission_w[i+2015] = w[i][11]
oil_emission_w[i+2015]['fuel_used'] = 'oil'
# aggregated ccs not possible new gas power plant
oil_emission_k[i+2015] = k[i][11]
oil_emission_k[i+2015]['fuel_used'] = 'oil'
# aggregate the emission of year i for different plant types
ccs = emission_aggregation_ccs(coal_emission_w[i+2015], coal_emission_k[i+2015], ccs, i)
ccs = emission_aggregation_ccs(gas_emission_w[i+2015], gas_emission_k[i+2015], ccs, i)
ccs = emission_aggregation_ccs(oil_emission_w[i+2015], oil_emission_k[i+2015], ccs, i)
# print (i+2015)
# text_file.write("year: %s" %(i+2015))
# text_file.write("###################### \n")
# post process the ccs table to calculate overall emission and select useful columns
ccs = bau_ccs_post(ccs)
ccs = ccs_results(ccs)
ccs.to_csv(rootPath + 'data/output/BAU_CCS/results_bau_ccs_learning_high.csv', index=False)
# ## 4 visualize the results
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
x = ccs.loc[:,('year')].values
y = ccs.loc[:,['coal_power_annual_emission_existing','gas_power_annual_emission_existing','oil_power_annual_emission_existing']].values
y = np.transpose(y)
sns.set_style("white")
sns.set_context("paper",font_scale=1)
plt.clf()
f, ax = plt.subplots(1, 1, figsize=(3,2.5))
# create your palette using html codes
pal = ["#F44027", "#CAF91E", "#2B8EF1", "#CAF91E"]
plt.stackplot(x,y, labels=['Coal','Natural Gas','Oil'], colors=pal)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height*0.85])
ax.legend(loc='center left', bbox_to_anchor=(-0.03,1.05), ncol=3)
ax.set(xlabel='Year', ylabel='CO2 annual emission (Gt/year)')
ax.set_xlim([2015, 2050])
xticks = [2015,2020,2030,2040,2050]
yticks = np.arange(0, 22, 2.5)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
plt.savefig(rootPath + 'public/images/annual_baseline.png', bbox_inches='tight', dpi=500)
print(json.dumps("COMPLETE")) | [
2,
22492,
16652,
362,
532,
1100,
262,
1176,
4618,
6831,
11,
2922,
1180,
2099,
1176,
4618,
11,
290,
4941,
4618,
25592,
329,
1111,
269,
6359,
290,
14805,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
25064,
198,
11748,
33918,
1... | 2.483712 | 7,306 |
from pygame import *
from GameSprite import GameSprite
#класс-наследник для спрайта-игрока (управляется стрелками)
| [
6738,
12972,
6057,
1330,
1635,
201,
198,
6738,
3776,
38454,
578,
1330,
3776,
38454,
578,
201,
198,
2,
31583,
30143,
16142,
21727,
21727,
12,
22177,
16142,
21727,
30143,
16843,
43666,
22177,
18849,
31583,
12466,
112,
30143,
40623,
220,
21727... | 1.45679 | 81 |
import pytest
from mock import patch
from time import sleep
from schedule import run_pending
@pytest.mark.parametrize("no_city_label_yet", [True, False])
@pytest.mark.parametrize(
"exec_sql_result, expected_post_called, expected_fct_result", [("Berlin", True, True), (None, False, False)]
)
| [
11748,
12972,
9288,
198,
6738,
15290,
1330,
8529,
198,
6738,
640,
1330,
3993,
198,
6738,
7269,
1330,
1057,
62,
79,
1571,
628,
198,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7203,
3919,
62,
19205,
62,
18242,
62,
25907,
... | 2.866667 | 105 |
import inspect
from datetime import datetime
from ipaddress import IPv4Address
import pytest
from CybORG import CybORG
from CybORG.Shared.Actions import MSFAutoroute
from CybORG.Shared.Actions import SSHLoginExploit
from CybORG.Shared.Enums import SessionType, ProcessState, ProcessType, AppProtocol, Architecture, \
OperatingSystemDistribution, OperatingSystemKernelVersion, OperatingSystemType
from CybORG.Tests.EphemeralPort import LinuxEphemeralPort
| [
11748,
10104,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
20966,
21975,
1330,
25961,
19,
20231,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
5934,
65,
1581,
38,
1330,
5934,
65,
1581,
38,
198,
6738,
5934,
65,
1581,
38,
13,
248... | 3.34058 | 138 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 30 22:57:06 2020
@author: philipp
"""
# =============================================================================
# Data Prep
# =============================================================================
#with open('data/start.txt', 'r') as myfile:
# data = myfile.read()
#
#line_list = data.splitlines()
#Common Crawl Jsonn
import json
import os
import gzip
import pdb
from multiprocessing import Pool
import subprocess
import socket
from bs4 import BeautifulSoup
from tqdm import tqdm
# =============================================================================
# 2nd Approch nearly keep the raw data
# =============================================================================
from somajo import SoMaJo
from tqdm import tqdm
import ray
import pathlib
@ray.remote
def split(list_of_text, thread_number, TMP_DIR):
"""
Splits text in sentences
Writes line for line with leading space (for BPE)
Every document is separated by a free line
"""
print(os.path.join(TMP_DIR, "Splitted_{:05d}.txt".format(thread_number)))
outF = open(os.path.join(TMP_DIR, "Splitted_{:05d}.txt".format(thread_number)), "w")
tokenizer = SoMaJo("de_CMC", split_camel_case=True)
for part in list_of_text:
sentences = tokenizer.tokenize_text([part])
for sentence in sentences:
output = ""
for token in sentence:
#word_list = [token.text for token in sentence]
if (token.space_after and not token.last_in_sentence and not token.first_in_sentence):
output += (token.text + ' ')
elif token.first_in_sentence:
output += (' ' + token.text + ' ')
else:
#output = " ".join(word_list[:-1])
output += token.text
#output += word_list[-1]
#sen_out.append(output)
outF.write(output)
outF.write("\n")
outF.write("\n")
return thread_number
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def read_file(path, TRANSFER_DIR=None, skip_files=None):
"""
Read and extracts all Files in Input Path
"""
#Select right json keyword:
global TYPE
if TYPE == 'CC':
json_key = 'raw_content'
elif TYPE == 'LEGALDUMP':
json_key = 'content'
if path.startswith('gs'):
from google.cloud import storage
p = pathlib.Path(path)
storage_client = storage.Client()
file_list = storage_client.list_blobs(p.parts[1], prefix="/".join(p.parts[2:]))
#print(file_list))
else:
file_list = os.listdir(path)
for file in file_list:
print(file)
if file not in skip_files:
if path.startswith('gs'): #Copy files from storage to local HDD first
pdb.set_trace()
#bucket = storage_client.bucket(p.parts[1])
#blob = bucket.blob("/".join(p.parts[2:]))
file.download_to_file(TRANSFER_DIR)
print("Blob {} downloaded to {}.".format(p.parts[2:],TRANSFER_DIR))
file_path = os.path.join(TRANSFER_DIR, file)
else:
file_path = os.path.join(path, file)
if file.endswith('.gz'):
data = []
with gzip.open(file_path, 'rt', encoding='utf-8', errors='ignore') as zipfile:
if FIX_EXPORT_ERROR:
a = zipfile.readline()
a = a.split("{'url'")
a = [("{'url'" + item) for item in a]
for line in tqdm(a[1:]):
#for line in zipfile:
#pdb.set_trace()
#scraped = json.loads(line)
try:
scraped = eval(line)
except ValueError:
scraped = eval(line.replace(chr(0), ""))
except SyntaxError:
None
#Only take really good parts
if scraped["language_score"] > 0.98:
data.append(scraped[json_key])
else:
None
#print(scraped[json_key])
elif file.endswith('.txt'):
with open(file_path) as f:
raw_text = f.readlines()
data = [x.strip() for x in raw_text]
elif file.endswith('.json'):
data = []
for line in open(file_path, 'r'):
scraped = json.loads(line)
data.append(scraped[json_key])
if TYPE == 'LEGALDUMP': #HTML to Text Conversion
data = [BeautifulSoup(line).text for line in data]
yield data
else:
print('Skipping file', file)
if __name__ == '__main__':
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]=""
os.environ["GCLOUD_PROJECT"]=""
#Define source type here
TYPE = 'CC'
FIX_EXPORT_ERROR = True #Fix weird format which was accidently saved as dedup output
#For local Debugging Purposes
if socket.gethostname() == "philipp-desktop":
VOCAB_FILE = "/media/data/48_BERT/german-transformer-training/src/vocab.txt"
THREADS = 8
#Path from where the txt files are read in
IN_DIR = "/media/data/48_BERT/german-transformer-training/data/head"
#When downloaded from gcp this is the destination folder
#TRANSFER_DIR = "/media/data/48_BERT/german-transformer-training/data/download"
#TMP_DIR: Folder where cleaned and splitted texts are places
TMP_DIR = "/media/data/48_BERT/german-transformer-training/data/tmp"
#TF_OUT_DIR: tf_records file for training BERT
TF_OUT_DIR = "/media/data/48_BERT/german-transformer-training/data/tf_rec"
#PATH TO BERT SRC Code
BERT_PATH = "../../01_BERT_Code/bert/"
else: #For large scale execution on server
VOCAB_FILE = "/mnt/disks/data/mBERT/vocab.txt"
THREADS = os.cpu_count()
IN_DIR = "/mnt/disks/data/data_head_url"
TMP_DIR = "/mnt/disks/data/data_head_url_cleaned"
TF_OUT_DIR = "/home/philipp_reissel/data/data_head_url_mbert_tfrec"
BERT_PATH = "/home/philipp_reissel/bert"
#a = list(files)
global_index = 0
READ_LOG = True
if READ_LOG:
with open('Log_Sentence_Extraction.txt', 'r') as log_file:
logs = log_file.readlines()
global_index = int(logs[-2].split('Splitted_')[1][0:5])
skip_files = [line.strip() for line in logs[:-1] if 'tar.gz' in line]
files = read_file(IN_DIR, skip_files=skip_files)
pdb.set_trace()
for file in files:
ray.init(num_cpus=THREADS)
result_ids = []
#1e4 for json,gz else 1e5
chunksize = int(1e3) #Needs XX GB RAM per Core
for local_index, chunk in enumerate(chunks(file, chunksize)):
index = global_index + local_index
result_ids.append(split.remote(chunk, index, TMP_DIR))
results = ray.get(result_ids)
global_index += local_index + 1
ray.shutdown()
pdb.set_trace()
cmd_var = []
for index, file in enumerate(os.listdir(TMP_DIR)):
file_path = os.path.join(TMP_DIR, file)
#os.system(bert_pretraining_cmd.format(file_path, TF_OUT_DIR, str(index)))
cmd_var.append([BERT_PATH, file_path, TF_OUT_DIR, index])
pool = Pool(processes=THREADS) #22 * 14 mb *dupe=5 (1540mb) equals 30 GB of RAM
pool.map(run_command, cmd_var) #Output: 22*270 Mb = 5940 mb Output Files
pool.close()
pool.join()
"""
filenames = [f"data/tmp/Splitted_{thread_number}.txt" for thread_number in range(0,THREADS)]
with open('data/merged.txt', 'w') as outfile:
for fname in tqdm(filenames):
with open(fname) as infile:
for line in infile:
outfile.write(line)
"""
# Takes 4 Minutes on 22 Cores for 1e6 Wiki lines (300 mb)
# =============================================================================
# DEPRECATED !!!!
# =============================================================================
"""
sen_out = []
#outF = open("data/Splitted.txt", "w")
outF = open("/media/data/47_KISS/11_tika/Sentences.txt", "w")
tokenizer = SoMaJo("de_CMC", split_camel_case=True)
#for part in tqdm(raw_text):
if True:
part = data
sentences = tokenizer.tokenize_text([part])
for sentence in sentences:
output = ""
for token in sentence:
#word_list = [token.text for token in sentence]
if (token.space_after and not token.last_in_sentence and not token.first_in_sentence):
output += (token.text + ' ')
elif token.first_in_sentence:
output += (' ' + token.text + ' ')
else:
#output = " ".join(word_list[:-1])
output += token.text
#output += word_list[-1]
sen_out.append(output)
if len(output) > 3 and len(output) < 300 and not 'ID' in output:
outF.write(output.strip())
outF.write("\n")
#outF.write("\n")data = json.loads(json_str)
outF.close()
"""
# =============================================================================
# Copy pasted from BERT tokenization.py
# =============================================================================
import unicodedata
def _run_split_on_punc(text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
# =============================================================================
# SoMaJo taken from https://github.com/tsproisl/SoMaJo
# =============================================================================
if False:
from tqdm import tqdm
sen_out = []
tokenizer = SoMaJo("de_CMC", split_camel_case=True)
for part in tqdm(raw_text):
sentences = tokenizer.tokenize_text([part])
for sentence in sentences:
word_list = [token.text for token in sentence]
output = " ".join(word_list[:-1])
output += word_list[-1]
sen_out.append(output)
_is_punctuation(raw_text[-1][-1])
stripped = []
for index, part in tqdm(enumerate(sen_out)):
reordered = ""
for char in part:
if not _is_punctuation(char):
reordered += char
else:
reordered += char
break
reordered = reordered.strip()
stripped.append(reordered)
outF = open("data/Splitted.txt", "w")
for line in stripped:
# write line to output file
outF.write(line)
outF.write("\n\n")
outF.close()
# =============================================================================
# Spacy
# =============================================================================
if False:
import spacy
data = "Trinken soll nur, wer's verträgt Jetzt ist's aus mit euch"
nlp = spacy.load("de_core_news_sm")
doc = nlp(data)
for sent in doc.sents:
print(sent.text)
# =============================================================================
# Moses: Used in cc_net https://github.com/luismsgomes/mosestokenizer
# =============================================================================
if False:
from mosestokenizer import *
splitsents = MosesSentenceSplitter('de')
splitsents([data])
# =============================================================================
# https://github.com/bminixhofer/nnsplit
# =============================================================================
if False:
from nnsplit import NNSplit
splitter = NNSplit("de")
res = splitter.split([data])
# =============================================================================
# More advanced: Deepsegment: Does not support German
# =============================================================================
if False:
from deepsegment import DeepSegment
# The default language is 'en'
segmenter = DeepSegment('de')
with open('data/start.txt', 'r') as myfile:
data = myfile.read()
segmenter.segment('I am Batman i live in gotham')
# =============================================================================
# Huggingface tokenizer
# =============================================================================
if False:
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
from pathlib import Path
tokenizer = ByteLevelBPETokenizer(
"data/german_old.json",
"data/german_old.txt",)
tokenizer = ByteLevelBPETokenizer(
"data/german_old.json",
"data/german_old.txt",)
tokenizer._tokenizer.post_processor = BertProcessing(
("</s>", tokenizer.token_to_id("</s>")),
("<s>", tokenizer.token_to_id("<s>")),)
tokenizer.enable_truncation(max_length=512)
#print(tokenizer.encode(sen_out[0]))
examples = []
lines = Path('data/Splitted.txt').read_text(encoding="utf-8").splitlines()
examples += [x.ids for x in tokenizer.encode_batch(lines)]
a = tokenizer.encode(sen_out[0])
a.tokens
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
7653,
1542,
2534,
25,
3553,
25,
3312,
12131,
198,
198,
31,
9800,
25,
872,
8908,
19... | 2.232499 | 6,671 |
__author__ = "R Devon Hjelm, Bogdan Mazoure, Florian Golemo"
__copyright__ = "Copyright (c) Microsoft Corporation and Mila - Quebec AI " \
"Institute"
__license__ = "MIT"
from flax.training.train_state import TrainState
from flax.training import checkpoints
import jax.numpy as jnp
import jax
import optax
import numpy as np
from models import TwinHeadModel
class NumpyRepresentation:
"""
A simple interface between pre-trained PPO policies (in Jax) and any other
framework, e.g. PyTorch, Tensorflow, etc. Converts all vectors to NumPy.
"""
def __call__(self,
x_: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Input:
x_: n_batch x resolution x resolution x 3, float32 array [0.,1.]
Output:
representation: n_batch x n_d
pi_logits: n_batch x n_actions
v: n_batch
"""
x_ = jnp.array(x_)
v, pi = self.train_state.apply_fn(self.train_state.params, x_)
z = self.train_state.apply_fn(self.train_state.params,
x_,
method=self.model_ppo.encode)
return np.array(z), np.array(pi.distribution.loc), np.array(v)
if __name__ == '__main__':
np_model = NumpyRepresentation(n_action=2)
obs = jnp.ones(shape=(64, 64, 3))
z, _, _ = np_model(obs)
print(z.shape)
| [
834,
9800,
834,
796,
366,
49,
36889,
367,
73,
417,
76,
11,
21555,
25604,
21625,
280,
260,
11,
4432,
666,
1514,
293,
5908,
1,
198,
834,
22163,
4766,
834,
796,
366,
15269,
357,
66,
8,
5413,
10501,
290,
4460,
64,
532,
14778,
9552,
36... | 2.151057 | 662 |
import json
import time
import pdb
import re
import traceback
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.common import exceptions
driver = webdriver.Chrome('../chromedriver.exe')
driver.maximize_window()
driver.get("https://loyalty.maverik.com/locations/map")
chain = {"name": "Maverik", "stores": []}
default_delay = 2
time.sleep(3)
with open("../Outputs/maverik.json", "r") as f:
chain = json.load(f)
# Close popup
driver.find_element_by_class_name("alert-button-inner").click()
# Select 'Search by Store #'
store_number_type = driver.find_element_by_xpath(
"/html/body/app-root/ion-app/ion-router-outlet/app-locations/div/div[2]/div/ion-radio-group/div/div[4]/ion-radio")
shadow_click(store_number_type)
time.sleep(3)
for i in range(694, 999):
search_bar = driver.find_element_by_xpath(
"/html/body/app-root/ion-app/ion-router-outlet/app-locations/div/div[2]/div/div[2]/ion-searchbar/div/input")
search_bar.send_keys(Keys.BACKSPACE * 3)
search_bar.send_keys(str(i))
time.sleep(default_delay)
for location_index in range(len(get_locations())):
location = get_locations()[location_index]
scrape(location)
while len(get_clusters()) > 0:
cluster = get_clusters()[0]
try:
ActionChains(driver).move_to_element(
cluster).click().click().perform()
except:
pass
time.sleep(default_delay)
for location_index in range(len(get_locations())):
location = get_locations()[location_index]
scrape(location)
print("Done")
| [
11748,
33918,
198,
11748,
640,
198,
11748,
279,
9945,
198,
11748,
302,
198,
11748,
12854,
1891,
198,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
2673,
62,
38861,
133... | 2.531044 | 757 |
"""
Monitor Global.health Kubernetes pods to see if any
are stuck in Pending status for a while. If such pods are
present, notify on Slack.
IMPORTANT:
The kubernetes Python package needs to be kept in sync
with the EKS Kubernetes version. The compatibility matrix
for the client Python library is at
https://github.com/kubernetes-client/python#compatibility
NOTE:
This will need to be updated once we move to separate
clusters for different environments.
"""
import os
import sys
import base64
import logging
import datetime
from typing import Any
from pathlib import Path
import requests
import kubernetes
SERVICES = ["curator", "data", "location"]
SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL")
def last_status(pod: dict[str, Any]) -> (str, datetime.datetime):
"Returns last status for pod"
statuses = list(pod.status.conditions)
statuses.sort(key=(lambda k: k.last_transition_time), reverse=True)
return statuses[0].type, statuses[0].last_transition_time
def is_service(pod: dict[str, Any]) -> bool:
"Returns whether pod is curator, location, or data pod"
return any(pod.metadata.name.startswith(service + "-") for service in SERVICES)
def get_pods() -> list[dict[str, Any]]:
"Returns list of pods in the cluster"
v1 = kubernetes.client.CoreV1Api()
return filter(is_service, v1.list_pod_for_all_namespaces(watch=False).items)
def is_not_ready(pod: dict[str, Any]) -> bool:
"Returns whether a pod is not ready"
return last_status(pod)[0] != "Ready"
def summary(pod: dict[str, Any]) -> str:
"Returns readable one-line summary of pod status"
status_type, status_time = last_status(pod)
return f"- *{pod.metadata.name}* ({status_type}, {status_time})"
def notify(text: str):
"Notifies Slack with message"
text = text.strip()
if text:
text = "⚠ Some pods are stuck in pending!\n" + text
logging.info(text)
if SLACK_WEBHOOK_URL and text:
response = requests.post(SLACK_WEBHOOK_URL, json={"text": text})
if response.status_code != 200:
logging.error(
f"Slack notification failed with {response.status_code}: {response.text}"
)
sys.exit(1)
if __name__ == "__main__":
ensure_kubeconfig_exists()
config = kubernetes.config.load_config()
notify("\n".join(map(summary, filter(is_not_ready, get_pods()))))
| [
37811,
198,
35479,
8060,
13,
13948,
12554,
527,
3262,
274,
37185,
284,
766,
611,
597,
198,
533,
7819,
287,
350,
1571,
3722,
329,
257,
981,
13,
1002,
884,
37185,
389,
198,
25579,
11,
19361,
319,
36256,
13,
198,
198,
3955,
15490,
8643,
... | 2.732651 | 879 |
# -*- coding:utf8 -*-
#
# Copyright (c) 2014 Xavier Lesa <xavierlesa@gmail.com>.
# All rights reserved.
# Distributed under the BSD license, see LICENSE
from setuptools import setup, find_packages
import sys, os
setup(name='djblog_wordpress_importer',
version='0.1',
description="Herramienta para migrar wordpress a djblog, a través de wordpress-json",
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
install_requires=[
'wordpress-json',
],
dependency_links=[
'git+https://github.com/ninjaotoko/djblog.git',
],
zip_safe=False,
author='Xavier Lesa',
author_email='xavierlesa@gmail.com',
url='http://github.com/ninjaotoko/djblog_wordpress_importer'
)
| [
2,
532,
9,
12,
19617,
25,
40477,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
66,
8,
1946,
30825,
11732,
64,
1279,
87,
19492,
829,
64,
31,
14816,
13,
785,
28401,
198,
2,
1849,
3237,
2489,
10395,
13,
198,
2,
4307,
6169,
739,
262... | 2.313725 | 357 |
import shlex
| [
11748,
427,
2588,
628,
628
] | 3.2 | 5 |
import re
import torch
import numpy as np
import scipy.sparse as sparse
from torch_geometric.data import Data as pyg_data
class Data(object):
r"""A plain old python object modeling a single graph with various
(optional) attributes:
Args:
x (Tensor, optional): Node feature matrix with shape :obj:`[num_nodes,
num_node_features]`. (default: :obj:`None`)
edge_index (LongTensor, optional): Graph connectivity in COO format
with shape :obj:`[2, num_edges]`. (default: :obj:`None`)
edge_attr (Tensor, optional): Edge feature matrix with shape
:obj:`[num_edges, num_edge_features]`. (default: :obj:`None`)
y (Tensor, optional): Graph or node targets with arbitrary shape.
(default: :obj:`None`)
pos (Tensor, optional): Node position matrix with shape
:obj:`[num_nodes, num_dimensions]`. (default: :obj:`None`)
The data object is not restricted to these attributes and can be extented
by any other additional data.
"""
@staticmethod
def from_dict(dictionary):
r"""Creates a data object from a python dictionary."""
data = Data()
for key, item in dictionary.items():
data[key] = item
return data
def __getitem__(self, key):
r"""Gets the data of the attribute :obj:`key`."""
return getattr(self, key)
def __setitem__(self, key, value):
"""Sets the attribute :obj:`key` to :obj:`value`."""
setattr(self, key, value)
@property
def keys(self):
r"""Returns all names of graph attributes."""
keys = [key for key in self.__dict__.keys() if self[key] is not None]
keys = [key for key in keys if key[:2] != '__' and key[-2:] != '__']
return keys
def __len__(self):
r"""Returns the number of all present attributes."""
return len(self.keys)
def __contains__(self, key):
r"""Returns :obj:`True`, if the attribute :obj:`key` is present in the
data."""
return key in self.keys
def __iter__(self):
r"""Iterates over all present attributes in the data, yielding their
attribute names and content."""
for key in sorted(self.keys):
yield key, self[key]
def __call__(self, *keys):
r"""Iterates over all attributes :obj:`*keys` in the data, yielding
their attribute names and content.
If :obj:`*keys` is not given this method will iterative over all
present attributes."""
for key in sorted(self.keys) if not keys else keys:
if self[key] is not None:
yield key, self[key]
def cat_dim(self, key, value):
r"""Returns the dimension in which the attribute :obj:`key` with
content :obj:`value` gets concatenated when creating batches.
.. note::
This method is for internal use only, and should only be overridden
if the batch concatenation process is corrupted for a specific data
attribute.
"""
# `*index*` and `*face*` should be concatenated in the last dimension,
# everything else in the first dimension.
return -1 if bool(re.search("(index|face)", key)) else 0
def __inc__(self, key, value):
r""""Returns the incremental count to cumulatively increase the value
of the next attribute of :obj:`key` when creating batches.
.. note::
This method is for internal use only, and should only be overridden
if the batch concatenation process is corrupted for a specific data
attribute.
"""
# Only `*index*` and `*face*` should be cumulatively summed up when
# creating batches.
return self.num_nodes if bool(re.search('(index|face)', key)) else 0
@property
def num_edges(self):
r"""Returns the number of edges in the graph."""
for key, item in self("edge_index", "edge_attr"):
return item.size(self.cat_dim(key, item))
return None
@property
def num_features(self):
r"""Returns the number of features per node in the graph."""
return 1 if self.x.dim() == 1 else self.x.size(1)
@property
@num_nodes.setter
def is_coalesced(self):
r"""Returns :obj:`True`, if edge indices are ordered and do not contain
duplicate entries."""
row, col = self.edge_index
index = self.num_nodes * row + col
return row.size(0) == torch.unique(index).size(0)
def apply(self, func, *keys):
r"""Applies the function :obj:`func` to all attributes :obj:`*keys`.
If :obj:`*keys` is not given, :obj:`func` is applied to all present
attributes.
"""
for key, item in self(*keys):
self[key] = func(item)
return self
def contiguous(self, *keys):
r"""Ensures a contiguous memory layout for all attributes :obj:`*keys`.
If :obj:`*keys` is not given, all present attributes are ensured to
have a contiguous memory layout."""
return self.apply(lambda x: x.contiguous(), *keys)
def to(self, device, *keys):
r"""Performs tensor dtype and/or device conversion to all attributes
:obj:`*keys`.
If :obj:`*keys` is not given, the conversion is applied to all present
attributes."""
return self.apply(lambda x: x.to(device), *keys)
def subgraph(self, node_idx):
"""Return the induced node subgraph."""
if self.__adj is None:
self._build_adj_()
if isinstance(node_idx, torch.Tensor):
node_idx = node_idx.cpu().numpy()
node_idx = np.unique(node_idx)
adj = self.__adj[node_idx, :][:, node_idx]
adj_coo = sparse.coo_matrix(adj)
row, col = adj_coo.row, adj_coo.col
edge_attr = torch.from_numpy(adj_coo.data).to(self.x.device)
edge_index = torch.from_numpy(np.concatenate([row, col], axis=0)).to(self.x.device)
keys = self.keys
attrs = {key: self[key][node_idx] for key in keys if "edge" not in key}
attrs["edge_attr"] = edge_attr
attrs["edge_index"] = edge_index
return Data(**attrs)
def edge_subgraph(self, edge_idx):
"""Return the induced edge subgraph."""
if isinstance(edge_idx, torch.Tensor):
edge_idx = edge_idx.cpu().numpy()
edge_index = self.edge_index.T[edge_idx].cpu().numpy()
node_idx = np.unique(edge_index)
idx_dict = {val: key for key, val in enumerate(node_idx)}
func = lambda x: [idx_dict[x[0]], idx_dict[x[1]]]
edge_index = np.array([func(x) for x in edge_index]).transpose()
edge_index = torch.from_numpy(edge_index).to(self.x.device)
edge_attr = self.edge_attr[edge_idx]
keys = self.keys
attrs = {key: self[key][node_idx] for key in keys if "edge" not in key}
attrs["edge_attr"] = edge_attr
attrs["edge_index"] = edge_index
return Data(**attrs)
@staticmethod
| [
11748,
302,
198,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
82,
29572,
355,
29877,
198,
6738,
28034,
62,
469,
16996,
13,
7890,
1330,
6060,
355,
12972,
70,
62,
7890,
628,
198,
4871,
6060,
7,
... | 2.385705 | 2,966 |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Canux CHENG <canuxcheng@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import logging
import traceback
from monitoring.nagios.plugin import NagiosPluginSSH
logger = logging.getLogger('plugin.nrftfm')
# define new args
# Init plugin
plugin = PluginNRFTFM(version="1.0",
description="check Oracle table")
plugin.shortoutput = "check lines from frmentprop table"
# Final status exit for the plugin
status = None
# ORACLE_HOME="/opt/oracle/ora11"
cmd = """echo ". /usr/local/bin/nrft.env ;echo \\"select count(*) from \
nrftfmfi.fmentprop;\\" \
|{0}/bin/sqlplus -s nagios/nagios_nrft" \
|sudo -u oracle -i \
|sed -n '/--/{{n; p;}}""".format(plugin.options.oracle_home)
logger.debug("cmd : {0}".format(cmd))
try:
command = plugin.ssh.execute(cmd)
output = command.output
errors = command.errors
logger.debug("Received output: %s", output)
logger.debug("Received errors: %s", errors)
except:
plugin.shortoutput = "Something unexpected happened ! " \
"Please investigate..."
plugin.longoutput = traceback.format_exc().splitlines()
plugin.unknown(plugin.output())
if errors:
plugin.unknown("Errors found:\n{}".format("\n".join(errors)))
for line in output:
result = int(line)
status = plugin.ok
logger.debug("Result: %d", result)
# Check threshold
if plugin.options.warning:
if result >= plugin.options.warning:
status = plugin.warning
plugin.shortoutput = "The number of lines in the fmentprop " \
"table is {}" .format(result)
if plugin.options.critical:
if result >= plugin.options.critical:
status = plugin.critical
plugin.shortoutput = "The number of lines in the fmentprop " \
"table is {}" .format(result)
# Return status with message to Nagios
logger.debug("Return status and exit to Nagios.")
if status:
plugin.shortoutput = "The number of lines in the fmentprop " \
"table is {}" .format(result)
status(plugin.output())
else:
plugin.unknown('Unexpected error during plugin execution, '
'please investigate with debug mode on.')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
13,
22,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
34,
8,
1853,
1680,
2821,
5870,
26808,
1279,
5171,
2821,
2395,
782,
31,
14816,
13,
785,
2... | 2.833476 | 1,171 |
import random
Quit = False
while not Quit:
#Start up message to the user
print("Hello user, this program allows you to play a guessing game"\
" with the computer. The computer will select a number between 1 and"\
" your max number and you will have to guess which number the computer has"\
" selected.")
print("The program will also keep a running total on how many times you guessed")
#Have the program select a number between 1 and the players number
Max = int(input("Please enter the maximum number you want:"))
Computer = random.randint(1, Max)
#Setup the counter
Counter = 0
#Tell the user that the computer has choosen and it is time to guess
print("The computer has choosen a number")
User = int(input("Please guess a number:"))
#Set up the loop
while User != Computer :
Counter += 1
print("You guessed wrong")
if User > Computer:
print("It was to high")
User = int(input("Please try a diffrent number:"))
elif User < Computer:
print("It was to low")
User = int(input("Please try a diffrent number:"))
#Once they guessed right
print("Congradulations user, you guessed the right number which was", \
User ,"you guessed a total of", Counter ,"times!")
PlayAgain = input("Would you like to play again (yes or no)?")
PlayAgain = PlayAgain.lower()
if PlayAgain == "yes" or PlayAgain == "y":
Quit = False
else:
Quit = True
print("Thanks for playing, hope you come back again!")
| [
11748,
4738,
198,
198,
4507,
270,
796,
10352,
198,
198,
4514,
407,
48887,
25,
198,
220,
220,
220,
1303,
10434,
510,
3275,
284,
262,
2836,
198,
220,
220,
220,
3601,
7203,
15496,
2836,
11,
428,
1430,
3578,
345,
284,
711,
257,
25260,
9... | 2.882246 | 552 |
from .db import db
from .entity import *
from .relation import *
| [
6738,
764,
9945,
1330,
20613,
198,
6738,
764,
26858,
1330,
1635,
198,
6738,
764,
49501,
1330,
1635,
628
] | 3.666667 | 18 |
"""A package to differentiate between the "standard" `users`
module and the version with online registration
.. autosummary::
:toctree:
users
"""
| [
37811,
32,
5301,
284,
28754,
1022,
262,
366,
20307,
1,
4600,
18417,
63,
198,
21412,
290,
262,
2196,
351,
2691,
9352,
198,
198,
492,
44619,
388,
6874,
3712,
198,
220,
220,
1058,
1462,
310,
631,
25,
628,
220,
220,
220,
2985,
198,
198,... | 3.466667 | 45 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-06 14:29
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
20,
319,
2177,
12,
940,
12,
3312,
1478,
25,
1959,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.736842 | 57 |
from open_alchemy import init_yaml
init_yaml("all-of-model-example-spec.yml")
| [
6738,
1280,
62,
282,
26599,
1330,
2315,
62,
88,
43695,
198,
198,
15003,
62,
88,
43695,
7203,
439,
12,
1659,
12,
19849,
12,
20688,
12,
16684,
13,
88,
4029,
4943,
198
] | 2.548387 | 31 |
from random import randint
import utils
objs = ["Rock", "Paper", "Scissor"]
computer = objs[randint(0, 2)]
playing = True
while playing:
player = input("Rock, Paper or Scissor ? ")
computer = objs[randint(0, 2)]
print(utils.getWinner(player, computer))
key = input(
"""
1. To keep Playing Press Enter
2. To Quit Press input Q
"""
)
if key == "q":
playing = False
print("Thank You For Playing!")
| [
6738,
4738,
1330,
43720,
600,
198,
11748,
3384,
4487,
198,
198,
672,
8457,
796,
14631,
19665,
1600,
366,
42950,
1600,
366,
3351,
747,
273,
8973,
198,
198,
33215,
796,
909,
8457,
58,
25192,
600,
7,
15,
11,
362,
15437,
198,
198,
17916,
... | 2.427083 | 192 |
from .Player import Player
import os
| [
6738,
764,
14140,
1330,
7853,
198,
11748,
28686,
628
] | 4.222222 | 9 |
from flask_wtf import FlaskForm
from wtforms import TextField
from wtforms.validators import Required, NumberRange, Optional
from mod_tags.models import *
| [
6738,
42903,
62,
86,
27110,
1330,
46947,
8479,
198,
6738,
266,
83,
23914,
1330,
8255,
15878,
198,
6738,
266,
83,
23914,
13,
12102,
2024,
1330,
20906,
11,
7913,
17257,
11,
32233,
198,
6738,
953,
62,
31499,
13,
27530,
1330,
1635,
628
] | 3.804878 | 41 |
"""
A deployment script that accomplishes the following:
Sets up a handler for the endpoint /deploy, which when triggered on a production port:
1) Navigates into a "debug" directory, where it pulls the latest copy of Brainspell from GitHub (or clones a copy, if one doesn't already exist)
2) Starts the "debug" Brainspell server at port 5858.
3) Triggers the /deploy endpoint on the debug server locally, which:
a) Navigates out of the "debug" directory, and pulls a fresh copy of the GitHub repo for the production server.
This process ensures that a GitHub push is deployed to production only if:
i) The server can successfully run and
ii) The deploy endpoint on the server still exists.
There is no guarantee that the deploy endpoint is functioning properly.
"""
# TODO: set up SupervisorD
import argparse
import os
import subprocess
from time import sleep
import tornado.ioloop
import brainspell
from base_handler import *
def subprocess_cmd_sync(command):
""" Synchronously run a bash command. """
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
def subprocess_cmd_async(command):
""" Asynchronously run a bash command. """
subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
class DeployHandler(BaseHandler):
""" Implement the protocol described above. """
| [
37811,
198,
32,
14833,
4226,
326,
6424,
5614,
262,
1708,
25,
198,
198,
50,
1039,
510,
257,
21360,
329,
262,
36123,
1220,
2934,
1420,
11,
543,
618,
13973,
319,
257,
3227,
2493,
25,
198,
198,
16,
8,
13244,
328,
689,
656,
257,
366,
2... | 3.590674 | 386 |
from NIENV import *
# API METHODS
# self.main_widget <- access to main widget
# Ports
# self.input(index) <- access to input data
# set_output_val(index, val) <- set output data port value
# self.exec_output(index) <- executes an execution output
# self.create_new_input(type_, label, widget_name=None, widget_pos='under', pos=-1)
# self.delete_input(index or input)
# self.create_new_output(type_, label, pos=-1)
# self.delete_output(index or output)
# Logging
# mylog = self.new_log('Example Log')
# mylog.log('I\'m alive!!')
# self.log_message('hello global!', 'global')
# self.log_message('that\'s not good', 'error')
# ------------------------------------------------------------------------------
import cv2
| [
6738,
24947,
1677,
53,
1330,
1635,
198,
198,
2,
7824,
337,
36252,
50,
198,
198,
2,
2116,
13,
12417,
62,
42655,
220,
220,
220,
220,
220,
220,
220,
24293,
1895,
284,
1388,
26295,
628,
198,
2,
30824,
198,
2,
2116,
13,
15414,
7,
9630,... | 2.865169 | 267 |
from pathlib import Path
from hat.doit import common
from hat.doit.js import (build_npm,
run_eslint)
__all__ = ['task_js_build',
'task_js_check',
'task_js_deps']
build_js_dir = Path('build/js')
src_js_dir = Path('src_js')
readme_path = Path('README.rst')
def task_js_build():
"""JavaScript - build"""
return {'actions': [build],
'task_dep': ['js_deps']}
def task_js_check():
"""JavaScript - check with eslint"""
return {'actions': [(run_eslint, [src_js_dir])],
'task_dep': ['js_deps']}
def task_js_deps():
"""JavaScript - install dependencies"""
return {'actions': ['yarn install --silent']}
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
6877,
13,
4598,
270,
1330,
2219,
198,
6738,
6877,
13,
4598,
270,
13,
8457,
1330,
357,
11249,
62,
77,
4426,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 2.205047 | 317 |
import logging
import re
from .connection_manager.errors import CLIError
log = logging.getLogger(__name__)
| [
11748,
18931,
198,
11748,
302,
198,
198,
6738,
764,
38659,
62,
37153,
13,
48277,
1330,
43749,
12331,
198,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628
] | 3.4375 | 32 |
import pysolr
import json
import requests
class SolrConnection(object):
'''
Connection to Solr database
'''
# The number of documents held in a core's insert queue before
# the documents in the core are automatically inserted.
QUEUE_THRESHOLD = 100
def __init__(self, url):
'''
Creates a SolrConnection form the given base Solr url of the form
'http://<solrhostname>:<port>/solr'.
'''
self.url = url
self.solr = pysolr.Solr(url, timeout=10)
self.solr_admin = pysolr.SolrCoreAdmin(url + '/admin/cores')
self.cores = {}
self.queues = {}
for core_name in self.fetch_core_names():
self.cores[core_name] = pysolr.Solr(self.url + '/' + core_name)
self.queues[core_name] = list()
def fetch_core_names(self):
'''
Makes a request to Solr and returns an array of strings where each
string is the name of a core in the response from Solr.
'''
status_response = self.solr_admin.status()
status = json.loads(status_response)
return [core_name for core_name in status['status']]
def core_names(self):
'''
Returns a list of known valid cores in the Solr instance without
making a request to Solr - this request excludes cores used for testing.
'''
valid_cores = list(self.cores.keys())
if 'test' in valid_cores:
valid_cores.remove('test')
return valid_cores
def fetch_core_schema(self, name):
'''
Returns the schema of the core with the given name as a dictionary.
'''
response = self._get_url("{}/{}/schema".format(self.url, name), {})
if 'schema' not in response:
raise ValueError('Solr did not return a schema. Are you sure ' + \
'the core named "{}" is an existing core?'.format(name))
return response['schema']
def queue_document(self, core, doc):
'''
Queues a document for insertion into the specified core and returns None.
If the number of documents in the queue exceeds a certain threshold,
this function will insert them all the documents held in the queue of the
specified core and return the response from Solr.
All values in 'doc' must be strings.
'''
if core not in self.cores:
raise ValueError("A core for the document type {} was not found".format(core))
self.queues[core].append(doc)
if len(self.queues[core]) >= self.QUEUE_THRESHOLD:
docs = list(self.queues[core].copy())
del self.queues[core][:]
return self.insert_documents(core, docs)
return None
def insert_documents(self, core_name, docs):
'''
Inserts given list of documents into specified core. Returns Solr response.
'''
if core_name not in self.cores:
raise ValueError('No Solr core with the name "{}" was found'.format(core_name))
print('Inserting {} items into core {}'.format(str(len(docs)), core_name))
return self.cores[core_name].add(docs)
def insert_queued(self):
'''
Inserts all queued documents across all cores. Returns an object
containing the Solr response from each core.
'''
response = {}
for core in self.cores:
docs = list(self.queues[core].copy())
del self.queues[core][:]
response[core] = self.insert_documents(core, docs)
return response
def query(self, core, query, sort="", start="", rows="", default_field="",
search_fields="", return_fields="", highlight_fields="", omit_header=True):
'''
Returns the response body from Solr corresponding to the given query.
See https://lucene.apache.org/solr/guide/6_6/common-query-parameters.html
and https://lucene.apache.org/solr/guide/6_6/highlighting.html
for common query parameters and parameter formatting.
Params (See Solr docs link above for details):
core (str): The name of the Solr core to search in.
query (str): The string to search the core for.
sort (str): The field to sort results on, and the sort order (see
Solr docs for details).
start (int): Specifies an offset into a query’s result set and instructs
Solr to begin displaying results from this offset.
rows (int): The maximum number of documents from the complete result
set that Solr should return.
default_field (str): The default field to search in.
search_fields (str): Defines a query that can be used to restrict
the superset of documents that can be returned, without
influencing score.
return_fields (str): Limits the information included in a query
response to a specified list of fields.
highlight_fields (str): Specifies a list of fields to highlight.
omit_header (bool): Whether or not Solr should include a header with
metadata about the query in its response.
'''
params = {
"q": query,
"wt": "json",
"df": default_field,
"omitHeader": "true" if omit_header else "false",
"hl.fragsize": 200
}
if sort is not "":
params["sort"] = sort
if start is not "":
params["start"] = start
if rows is not "":
params["rows"] = rows
if search_fields is not "":
params["fq"] = search_fields
if return_fields is not "":
params["fl"] = return_fields
if highlight_fields is not "":
params["hl"] = "on"
params["hl.fl"] = highlight_fields
return self._get_url("{}/{}/select".format(self.url, core), params)
def optimize(self, core_name=None):
'''
Performs defragmentation of specified core in Solr database.
If no core is specified, defragments all cores.
'''
if core_name:
if core_name not in self.cores:
raise ValueError('No Solr core with the name "{}" was found'.format(core_name))
self.cores[core_name].optimize()
else:
[self.cores[core].optimize() for core in self.cores]
def _get_url(self, url, params):
'''
Makes a request to the given url relative to the base url with the given
parameters and returns the response as a JSON string.
'''
response = requests.get(url, params=pysolr.safe_urlencode(params))
return response.json()
| [
11748,
279,
893,
349,
81,
198,
11748,
33918,
198,
11748,
7007,
198,
198,
4871,
4294,
81,
32048,
7,
15252,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
26923,
284,
4294,
81,
6831,
198,
220,
220,
220,
705,
7061,
628,
220,
... | 2.351156 | 2,899 |
from enum import Enum
import cv2
import numpy as np
def convertScale(img, alpha, beta):
"""Add bias and gain to an image with saturation arithmetics. Unlike
cv2.convertScaleAbs, it does not take an absolute value, which would lead to
nonsensical results (e.g., a pixel at 44 with alpha = 3 and beta = -210
becomes 78 with OpenCV, when in fact it should become 0).
"""
new_img = img * alpha + beta
new_img[new_img < 0] = 0
new_img[new_img > 255] = 255
return new_img.astype(np.uint8)
# Automatic brightness and contrast optimization with optional histogram clipping
| [
6738,
33829,
1330,
2039,
388,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
628,
628,
198,
4299,
10385,
29990,
7,
9600,
11,
17130,
11,
12159,
2599,
198,
220,
220,
220,
37227,
4550,
10690,
290,
4461,
284,
281,
2939,
351,... | 3.102041 | 196 |
# Should test if the langevin actually produces adversarial against a pretrained classifier
from adv_train.launcher import Launcher
from adv_train.model import (
DatasetType,
MnistModel,
CifarModel,
load_classifier,
load_dataset,
)
from adv_train.dynamic import Attacker
from adv_train.dataset import AdversarialDataset
import argparse
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import tqdm
if __name__ == "__main__":
parser = Attacker.add_arguments()
parser = LangevinAttack.add_arguments(parser)
args = parser.parse_args()
torch.manual_seed(1234)
attack = LangevinAttack(args)
attack.launch()
| [
2,
10358,
1332,
611,
262,
300,
858,
7114,
1682,
11073,
16907,
36098,
1028,
257,
2181,
13363,
1398,
7483,
198,
6738,
1354,
62,
27432,
13,
38722,
2044,
1330,
26385,
198,
6738,
1354,
62,
27432,
13,
19849,
1330,
357,
198,
220,
220,
220,
1... | 2.913793 | 232 |
import sqlite3
if __name__ == '__main__':
dbname = "myDatabase/Sqlite_Northwind.sqlite3"
sales = [int(input("กรอกจำนวนที่สินค้าขายได้มากกว่ากี่ครั้ง : "))]
sqlQuery(dbname, sales)
| [
11748,
44161,
578,
18,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
20613,
3672,
796,
366,
1820,
38105,
14,
50,
13976,
578,
62,
14157,... | 1.302469 | 162 |
import re
import subprocess
from IPython.utils.text import SList
__all__ = (
'ish_out',
'ish_run',
)
expression_str = r'\!([^\(\[].*)'
expression = re.compile(expression_str)
assignment = re.compile(r'([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*' + expression_str)
@events.on_transform_command
| [
11748,
302,
198,
11748,
850,
14681,
198,
198,
6738,
6101,
7535,
13,
26791,
13,
5239,
1330,
311,
8053,
628,
198,
834,
439,
834,
796,
357,
198,
220,
220,
220,
705,
680,
62,
448,
3256,
198,
220,
220,
220,
705,
680,
62,
5143,
3256,
19... | 2.244275 | 131 |
"""Tests for handling setup action blocks in ModuleManager."""
from pathlib import Path
from astrality.module import ModuleManager
def test_that_setup_block_is_only_executed_once(tmpdir):
"""Setup blocks in modules should only be performed once."""
touched = Path(tmpdir, 'touched.tmp')
modules = {
'A': {
'on_setup': {
'run': {
'shell': f'touch {touched}',
},
},
},
}
module_manager = ModuleManager(modules=modules)
# The touched file should not exist before we have done anything
assert not touched.exists()
# After finishing tasks, the file should be touched
module_manager.finish_tasks()
assert touched.exists()
# We now create a new object lifetime
del module_manager
touched.unlink()
# The setup block should now *not* be executed
module_manager = ModuleManager(modules=modules)
module_manager.finish_tasks()
assert not touched.exists()
| [
37811,
51,
3558,
329,
9041,
9058,
2223,
7021,
287,
19937,
13511,
526,
15931,
198,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
6468,
1373,
414,
13,
21412,
1330,
19937,
13511,
628,
198,
4299,
1332,
62,
5562,
62,
40406,
62,
9967,
... | 2.632813 | 384 |
from torch.nn import LSTM
from torch.autograd import Variable
import torch
import torch.nn.functional as F
from spodernet.frontend import AbstractModel
from spodernet.utils.global_config import Config
| [
6738,
28034,
13,
20471,
1330,
406,
2257,
44,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
6738,
599,
375,
1142,
316,
13,
8534,
437,
1330,
2774... | 3.491525 | 59 |
import unittest
from unittest import mock
from groupy import pagers
from groupy.api import chats
| [
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
6738,
1448,
88,
1330,
279,
10321,
198,
6738,
1448,
88,
13,
15042,
1330,
40815,
628,
628
] | 3.482759 | 29 |
import sys
import string
from copy import deepcopy
D_OUT_FILE = 'runtime/layout.d'
JS_OUT_FILE = 'runtime/layout.js'
JS_DEF_PREFIX = '$rt_'
# Type sizes in bytes
typeSize = {
'uint8':1,
'uint16':2,
'uint32':4,
'uint64':8,
'int8':1,
'int16':2,
'int32':4,
'int64':8,
'float64':8,
'rawptr':8,
'refptr':8,
'funptr':8,
'shapeptr':8,
}
typeShortName = {
'uint8':'u8',
'uint16':'u16',
'uint32':'u32',
'uint64':'u64',
'int8':'i8',
'int16':'i16',
'int32':'i32',
'int64':'i64',
'float64':'f64',
'rawptr':'rawptr',
'refptr':'refptr',
'funptr':'funptr',
'shapeptr':'shapeptr',
}
# Layout declarations
layouts = [
# String layout
{
'name':'str',
'tag':'string',
'fields':
[
# String length
{ 'name': "len" , 'tag':'uint32' },
# Hash code
{ 'name': 'hash', 'tag':'uint32' },
# UTF-16 character data
{ 'name': 'data', 'tag':'uint16', 'szField':'len' }
]
},
# String table layout (for hash consing)
{
'name':'strtbl',
'tag':'refptr',
'fields':
[
# Capacity, total number of slots
{ 'name':'cap', 'tag':'uint32' },
# Number of strings
{ 'name':'num_strs', 'tag':'uint32', 'init':"0" },
# Array of strings
{ 'name':'str', 'tag':'refptr', 'szField':'cap', 'init':'null' },
]
},
# Rope
{
'name':'rope',
'tag':'rope',
'fields':
[
# Total length
{ 'name':'len', 'tag':'uint32' },
# Left string (rope or string)
{ 'name':'left', 'tag':'refptr' },
# Right string (always a string)
{ 'name':'right', 'tag':'refptr' },
]
},
# Object layout
{
'name':'obj',
'tag':'object',
'fields':
[
# Capacity, number of property slots
{ 'name':"cap" , 'tag':"uint32" },
# Object shape index
{ 'name':"shape_idx", 'tag':"uint32" },
# Property words
{ 'name':"word", 'tag':"uint64", 'szField':"cap", 'tpField':'tag' },
# Property types
{ 'name':"tag", 'tag':"uint8", 'szField':"cap" }
]
},
# Function/closure layout (extends object)
{
'name':'clos',
'tag':'closure',
'extends':'obj',
'fields':
[
# Note: the function pointer is stored in the first object slot
# Number of closure cells
{ 'name':"num_cells" , 'tag':"uint32" },
# Closure cell pointers
{ 'name':"cell", 'tag':"refptr", 'szField':"num_cells", 'init':"null" },
]
},
# Closure cell
{
'name':'cell',
'tag':'refptr',
'fields':
[
# Value word
{ 'name':"word", 'tag':"uint64", 'init':'undef_word', 'tpField':'tag' },
# Value type
{ 'name':"tag", 'tag':"uint8", 'init':'undef_type' },
]
},
# Array layout (extends object)
{
'name':'arr',
'tag':'array',
'extends':'obj',
'fields':
[
]
},
# Array table layout (contains array elements)
{
'name':'arrtbl',
'tag':'refptr',
'fields':
[
# Array capacity
{ 'name':"cap" , 'tag':"uint32" },
# Element words
{ 'name':"word", 'tag':"uint64", 'szField':"cap", 'tpField':'tag' },
# Element types
{ 'name':"tag", 'tag':"uint8", 'szField':"cap" },
]
},
]
# Indent a text string
# Perform basic validation
for layout in layouts:
# Check for duplicate field names
for fieldIdx, field in enumerate(layout['fields']):
for prev in layout['fields'][:fieldIdx]:
if prev['name'] == field['name']:
raise Exception('duplicate field name ' + field['name'])
# Perform layout extensions
for layoutIdx, layout in enumerate(layouts):
# If this layout does not extend another, skip it
if 'extends' not in layout:
continue
# Find the parent layout
parent = None
for prev in layouts[:layoutIdx]:
if prev['name'] == layout['extends']:
parent = prev
break
if parent == None:
raise Exception("parent not found")
# Add the parent fields (except type) to this layout
fieldCopies = []
for field in parent['fields']:
fieldCopies += [deepcopy(field)]
layout['fields'] = fieldCopies + layout['fields']
# Assign layout ids, add the next and header fields
nextLayoutId = 0
for layout in layouts:
layoutId = nextLayoutId
layout['typeId'] = layoutId
nextLayoutId += 1
nextField = [{ 'name':'next', 'tag':'refptr', 'init':"null" }]
typeField = [{ 'name':'header', 'tag':'uint32', 'init':str(layoutId) }]
layout['fields'] = nextField + typeField + layout['fields']
# Find/resolve size fields
for layout in layouts:
# List of size fields for this layout
layout['szFields'] = []
for fieldIdx, field in enumerate(layout['fields']):
# If this field has no size field, skip it
if 'szField' not in field:
continue
# Find the size field and add it to the size field list
szName = field['szField']
field['szField'] = None
for prev in layout['fields'][:fieldIdx]:
if prev['name'] == szName:
field['szField'] = prev
# Add the field to the size field list
if prev not in layout['szFields']:
layout['szFields'] += [prev]
break
# If the size field was not found, raise an exception
if field['szField'] == None:
raise Exception('size field "%s" of "%s" not found' % (szName, field['name']))
# Find/resolve word type fields
for layout in layouts:
for field in layout['fields']:
# If this field has no type field, skip it
if 'tpField' not in field:
continue
# Find the type field
tpName = field['tpField']
field['tpField'] = None
for prev in layout['fields']:
if prev['name'] == tpName:
field['tpField'] = prev
# If the type field was not found, raise an exception
if field['tpField'] == None:
raise Exception('type field "%s" of "%s" not found' % (tpName, field['name']))
# Compute field alignment requirements
for layout in layouts:
#print('');
#print(layout['name'])
# Current offset since last dynamic alignment
curOfs = 0
# For each field of this layout
for fieldIdx, field in enumerate(layout['fields']):
# Field type size
fSize = typeSize[field['tag']]
# If the previous field was dynamically sized and of smaller type size
if fieldIdx > 0 and 'szField' in layout['fields'][fieldIdx-1] and \
typeSize[layout['fields'][fieldIdx-1]['tag']] < fSize:
# This field will be dynamically aligned
field['dynAlign'] = True
field['alignPad'] = 0
# Reset the current offset
curOfs = 0
else:
# Compute the padding required for alignment
alignRem = curOfs % fSize
if alignRem != 0:
alignPad = fSize - alignRem
else:
alignPad = 0
field['dynAlign'] = False
field['alignPad'] = alignPad
# Update the current offset
curOfs += alignPad + fSize
#print(field['name'])
#print(' fSize: ' + str(fSize))
#print(' align: ' + str(field['alignPad']))
# List of generated functions and declarations
decls = []
# For each layout
for layout in layouts:
ofsPref = layout['name'] + '_ofs_';
setPref = layout['name'] + '_set_';
getPref = layout['name'] + '_get_';
# Define the layout type constant
decls += [ConstDef(
'uint32',
'LAYOUT_' + layout['name'].upper(),
layout['typeId']
)]
# Generate offset computation functions
for fieldIdx, field in enumerate(layout['fields']):
fun = Function('uint32', ofsPref + field['name'], [Var('refptr', 'o')])
if 'szField' in field:
fun.params += [Var('uint32', 'i')]
sumExpr = Cst(0)
for prev in layout['fields'][:fieldIdx]:
# If this field must be dymamically aligned
if prev['dynAlign']:
ptrSize = typeSize['rawptr']
sumExpr = AndExpr(AddExpr(sumExpr, Cst(ptrSize - 1)), Cst(-ptrSize))
elif prev['alignPad'] > 0:
sumExpr = AddExpr(sumExpr, Cst(prev['alignPad']))
# Compute the previous field size
termExpr = Cst(typeSize[prev['tag']])
if 'szField' in prev:
szCall = CallExpr(getPref + prev['szField']['name'], [fun.params[0]])
termExpr = MulExpr(termExpr, szCall)
sumExpr = AddExpr(sumExpr, termExpr)
# If this field must be dymamically aligned
if field['dynAlign']:
ptrSize = typeSize['rawptr']
sumExpr = AndExpr(AddExpr(sumExpr, Cst(ptrSize - 1)), Cst(-ptrSize))
elif field['alignPad'] > 0:
sumExpr = AddExpr(sumExpr, Cst(field['alignPad']))
# Compute the index into the last field
if 'szField' in field:
fieldSize = Cst(typeSize[field['tag']])
sumExpr = AddExpr(sumExpr, MulExpr(fieldSize , fun.params[1]))
fun.stmts += [RetStmt(sumExpr)]
decls += [fun]
# Generate getter methods
for fieldIdx, field in enumerate(layout['fields']):
fun = Function(field['tag'], getPref + field['name'], [Var('refptr', 'o')])
if 'szField' in field:
fun.params += [Var('uint32', 'i')]
ofsCall = CallExpr(ofsPref + field['name'], [fun.params[0]])
if 'szField' in field:
ofsCall.args += [fun.params[1]]
fun.stmts += [RetStmt(LoadExpr(field['tag'], fun.params[0], ofsCall))]
decls += [fun]
# Generate setter methods
for fieldIdx, field in enumerate(layout['fields']):
fun = Function('void', setPref + field['name'], [Var('refptr', 'o')])
if 'szField' in field:
fun.params += [Var('uint32', 'i')]
fun.params += [Var(field['tag'], 'v')]
ofsCall = CallExpr(ofsPref + field['name'], [fun.params[0]])
if 'szField' in field:
ofsCall.args += [fun.params[1]]
fun.stmts += [ExprStmt(StoreExpr(field['tag'], fun.params[0], ofsCall, fun.params[-1]))]
decls += [fun]
# Generate the layout size computation function
fun = Function('uint32', layout['name'] + '_comp_size', [])
szVars = {}
for szField in layout['szFields']:
szVar = Var(szField['tag'], szField['name'])
szVars[szVar.name] = szVar
fun.params += [szVar]
szSum = Cst(0)
for field in layout['fields']:
# If this field must be dymamically aligned
if field['dynAlign']:
ptrSize = typeSize['rawptr']
szSum = AndExpr(AddExpr(szSum, Cst(ptrSize - 1)), Cst(-ptrSize))
elif field['alignPad'] > 0:
szSum = AddExpr(szSum, Cst(field['alignPad']))
szTerm = Cst(typeSize[field['tag']])
if 'szField' in field:
szTerm = MulExpr(szTerm, szVars[field['szField']['name']])
szSum = AddExpr(szSum, szTerm)
fun.stmts += [RetStmt(szSum)]
decls += [fun]
# Generate the sizeof method
fun = Function('uint32', layout['name'] + '_sizeof', [Var('refptr', 'o')])
callExpr = CallExpr(layout['name'] + '_comp_size', [])
for szField in layout['szFields']:
getCall = CallExpr(getPref + szField['name'], [fun.params[0]])
callExpr.args += [getCall]
fun.stmts += [RetStmt(callExpr)]
decls += [fun]
# Generate the allocation function
fun = Function('refptr', layout['name'] + '_alloc', [Var('VM', 'vm')])
szVars = {}
for szField in layout['szFields']:
szVar = Var(szField['tag'], szField['name'])
szVars[szVar.name] = szVar
fun.params += [szVar]
szCall = CallExpr(layout['name'] + '_comp_size', [])
for szField in layout['szFields']:
szCall.args += [szVars[szField['name']]]
objVar = Var('refptr', 'o')
fun.stmts += [DeclStmt(objVar, AllocExpr(szCall, layout['tag']))]
for szField in layout['szFields']:
setCall = CallExpr(setPref + szField['name'], [objVar, szVars[szField['name']]])
fun.stmts += [ExprStmt(setCall)]
for field in layout['fields']:
if 'init' not in field:
continue
initVal = field['init']
# Some init values map to zero and do not need to be written
if initVal == '0':
continue
if initVal == 'null':
continue
if initVal == 'undef_type':
continue
if 'szField' in field:
loopVar = Var('uint32', 'i')
szVar = szVars[field['szField']['name']]
setCall = CallExpr(setPref + field['name'], [objVar, loopVar, Cst(field['init'])])
fun.stmts += [ForLoop(loopVar, szVar, [ExprStmt(setCall)])]
else:
setCall = CallExpr(setPref + field['name'], [objVar, Cst(field['init'])])
fun.stmts += [ExprStmt(setCall)]
fun.stmts += [RetStmt(objVar)]
decls += [fun]
# Generate the GC visit function
fun = Function('void', layout['name'] + '_visit_gc', [Var('VM', 'vm'), Var('refptr', 'o')])
vmVar = fun.params[0]
objVar = fun.params[1]
for field in layout['fields']:
# If this is not a heap reference field, skip it
if field['tag'] != 'refptr' and (not 'tpField' in field):
continue
# If this is a variable-size field
if 'szField' in field:
szVar = Var('uint32', field['szField']['name'])
szStmt = DeclStmt(szVar, CallExpr(getPref + field['szField']['name'], [objVar]))
fun.stmts += [szStmt]
loopVar = Var('uint32', 'i')
# If this is a word/type pair
if 'tpField' in field:
getWCall = CallExpr(getPref + field['name'], [objVar, loopVar])
getTCall = CallExpr(getPref + field['tpField']['name'], [objVar, loopVar])
fwdCall = CallExpr('gcForward', [vmVar, getWCall, getTCall])
else:
getCall = CallExpr(getPref + field['name'], [objVar, loopVar])
fwdCall = CallExpr('gcForward', [vmVar, getCall])
setCall = CallExpr(setPref + field['name'], [objVar, loopVar, fwdCall])
fun.stmts += [ForLoop(loopVar, szVar, [ExprStmt(setCall)])]
else:
# If this is a word/type pair
if 'tpField' in field:
getWCall = CallExpr(getPref + field['name'], [objVar])
getTCall = CallExpr(getPref + field['tpField']['name'], [objVar])
fwdCall = CallExpr('gcForward', [vmVar, getWCall, getTCall])
else:
getCall = CallExpr(getPref + field['name'], [objVar])
fwdCall = CallExpr('gcForward', [vmVar, getCall])
setCall = CallExpr(setPref + field['name'], [objVar, fwdCall])
fun.stmts += [ExprStmt(setCall)]
decls += [fun]
# Generate the sizeof dispatch method
fun = Function('uint32', 'layout_sizeof', [Var('refptr', 'o')])
typeVar = Var('uint32', 't')
fun.stmts += [DeclStmt(typeVar, CallExpr('obj_get_header', [fun.params[0]]))]
for layout in layouts:
cmpExpr = EqExpr(typeVar, Var('uint32', 'LAYOUT_' + layout['name'].upper()))
retStmt = RetStmt(CallExpr(layout['name'] + '_sizeof', [fun.params[0]]))
fun.stmts += [IfStmt(cmpExpr, [retStmt])]
fun.stmts += [ExprStmt(CallExpr('assert', [Cst('false'), Cst('"invalid layout in layout_sizeof"')]))]
decls += [fun]
# Generate the GC visit dispatch method
fun = Function('void', 'layout_visit_gc', [Var('VM', 'vm'), Var('refptr', 'o')])
typeVar = Var('uint32', 't')
fun.stmts += [DeclStmt(typeVar, CallExpr('obj_get_header', [fun.params[1]]))]
for layout in layouts:
cmpExpr = EqExpr(typeVar, Var('uint32', 'LAYOUT_' + layout['name'].upper()))
callStmt = ExprStmt(CallExpr(layout['name'] + '_visit_gc', [fun.params[0], fun.params[1]]))
retStmt = RetStmt()
fun.stmts += [IfStmt(cmpExpr, [callStmt, retStmt])]
fun.stmts += [ExprStmt(CallExpr('assert', [Cst('false'), Cst('"invalid layout in layout_visit_gc"')]))]
decls += [fun]
# Open the output files for writing
DFile = open(D_OUT_FILE, 'w')
JSFile = open(JS_OUT_FILE, 'w')
comment = \
'//\n' + \
'// Code auto-generated from "' + sys.argv[0] + '". Do not modify.\n' + \
'//\n\n'
DFile.write(comment)
JSFile.write(comment)
DFile.write('module runtime.layout;\n')
DFile.write('\n');
DFile.write('import runtime.vm;\n')
DFile.write('import runtime.gc;\n')
DFile.write('\n');
DFile.write('alias ubyte* funptr;\n');
DFile.write('alias ubyte* shapeptr;\n');
DFile.write('alias ubyte* rawptr;\n');
DFile.write('alias ubyte* refptr;\n');
DFile.write('alias byte int8;\n');
DFile.write('alias short int16;\n');
DFile.write('alias int int32;\n');
DFile.write('alias long int64;\n');
DFile.write('alias ubyte uint8;\n');
DFile.write('alias ushort uint16;\n');
DFile.write('alias uint uint32;\n');
DFile.write('alias ulong uint64;\n');
DFile.write('alias double float64;\n');
DFile.write('\n');
# Output D and JS code, write to file
for decl in decls:
JSFile.write(decl.genJS() + '\n\n')
DFile.write(decl.genD() + '\n\n')
DFile.close()
JSFile.close()
| [
11748,
25064,
198,
11748,
4731,
198,
6738,
4866,
1330,
2769,
30073,
198,
198,
35,
62,
12425,
62,
25664,
796,
705,
43282,
14,
39786,
13,
67,
6,
198,
20120,
62,
12425,
62,
25664,
796,
705,
43282,
14,
39786,
13,
8457,
6,
198,
198,
2012... | 2.091478 | 8,625 |
import os
import sys
from os.path import abspath, join
from seisflows.tools import unix
from seisflows.tools.code import call, findpath, saveobj
from seisflows.tools.config import ParameterError, custom_import, \
SeisflowsObjects, SeisflowsParameters, SeisflowsPaths
PAR = SeisflowsParameters()
PATH = SeisflowsPaths()
class tiger_md_gpu(custom_import('system', 'tiger_md')):
""" An interface through which to submit workflows, run tasks in serial or
parallel, and perform other system functions.
By hiding environment details behind a python interface layer, these
classes provide a consistent command set across different computing
environments.
For important additional information, please see
http://seisflows.readthedocs.org/en/latest/manual/manual.html#system-configuration
"""
def check(self):
""" Checks parameters and paths
"""
raise NotImplementedError('Provided by Etienne Bachmann. Not recently testested and not likely to work out of the box.')
# why does Etienne have it this way?
if 'NGPU' not in PAR:
setattr(PAR, 'NGPU', 4)
super(tiger_md_gpu, self).check()
def submit(self, workflow):
""" Submits workflow
"""
unix.mkdir(PATH.OUTPUT)
unix.cd(PATH.OUTPUT)
self.checkpoint()
if not exists(PATH.SUBMIT + '/' + 'scratch'):
unix.ln(PATH.SCRATCH, PATH.SUBMIT + '/' + 'scratch')
call('sbatch '
+ '--job-name=%s ' % PAR.SUBTITLE
+ '--output=%s ' % (PATH.SUBMIT +'/'+ 'output.log')
+ '--nodes 1 '
+ '--ntasks=% ' % PAR.NGPU
+ '--ntasks-per-socket=%d ' % PAR.NGPU
+ '--gres=gpu:%d ' % PAR.NGPU
+ '--time=%d ' % PAR.WALLTIME
+ findpath('seisflows.system') +'/'+ 'wrappers/submit '
+ PATH.OUTPUT)
def run(self, classname, funcname, hosts='all', **kwargs):
""" Runs tasks in serial or parallel on specified hosts
"""
self.checkpoint()
self.save_kwargs(classname, funcname, kwargs)
if hosts == 'all':
call('srun '
+ '--wait=0 '
+ join(findpath('seisflows.system'), 'wrappers/run ')
+ PATH.OUTPUT + ' '
+ classname + ' '
+ funcname)
elif hosts == 'head':
# run on head node
call('srun '
+ '--wait=0 '
+ join(findpath('seisflows.system'), 'wrappers/run_head ')
+ PATH.OUTPUT + ' '
+ classname + ' '
+ funcname)
def getnode(self):
""" Gets number of running task
"""
gid = os.getenv('SLURM_GTIDS').split(',')
lid = int(os.getenv('SLURM_LOCALID'))
return int(gid[lid])
| [
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
28686,
13,
6978,
1330,
2352,
6978,
11,
4654,
198,
198,
6738,
384,
271,
44041,
13,
31391,
1330,
555,
844,
198,
6738,
384,
271,
44041,
13,
31391,
13,
8189,
1330,
869,
11,
1064,
6978,
11,
... | 2.100142 | 1,408 |
#! /usr/bin/env python3
import fileinput
import random
import re
from dstruct.Mention import Mention
from dstruct.Sentence import Sentence
from dstruct.Relation import Relation
from helper.dictionaries import load_dict
from helper.easierlife import get_dict_from_TSVline, no_op, TSVstring2list
# Load the gene<->hpoterm dictionary
genehpoterms_dict = load_dict("genehpoterms")
# Supervise the candidates
if __name__ == "__main__":
# Process input
with fileinput.input() as input_files:
for line in input_files:
# Parse the TSV line
line_dict = get_dict_from_TSVline(
line, ["doc_id", "sent_id", "wordidxs", "words", "poses",
"ners", "lemmas", "dep_paths", "dep_parents",
"bounding_boxes", "gene_entities", "gene_wordidxss",
"gene_is_corrects", "gene_types",
"hpoterm_entities", "hpoterm_wordidxss",
"hpoterm_is_corrects", "hpoterm_types"],
[no_op, int, lambda x: TSVstring2list(x, int), TSVstring2list,
TSVstring2list, TSVstring2list, TSVstring2list,
TSVstring2list, lambda x: TSVstring2list(x, int),
TSVstring2list, # these are for the sentence
TSVstring2list, lambda x: TSVstring2list(x, sep="!~!"),
TSVstring2list, TSVstring2list, # these are for the genes
TSVstring2list, lambda x: TSVstring2list(x, sep="!~!"),
TSVstring2list, TSVstring2list, # these are for the HPO
])
# Remove the genes that are unsupervised copies or duplicates
supervised_idxs = set()
unsupervised_idxs = set()
for i in range(len(line_dict["gene_is_corrects"])):
if line_dict["gene_is_corrects"][i] == "n":
unsupervised_idxs.add(i)
else:
if line_dict["gene_types"][i] != "GENE_SUP_contr_2":
# The above condition is to avoid duplicates
supervised_idxs.add(i)
survived_unsuperv_idxs = set()
for i in unsupervised_idxs:
wordidxs = line_dict["gene_wordidxss"][i]
found = False
for j in supervised_idxs:
if line_dict["gene_wordidxss"][j] == wordidxs:
found = True
break
if not found:
survived_unsuperv_idxs.add(i)
to_keep = sorted(survived_unsuperv_idxs | supervised_idxs)
new_gene_entities = []
new_gene_wordidxss = []
new_gene_is_corrects = []
new_gene_types = []
for i in to_keep:
new_gene_entities.append(line_dict["gene_entities"][i])
new_gene_wordidxss.append(line_dict["gene_wordidxss"][i])
new_gene_is_corrects.append(line_dict["gene_is_corrects"][i])
new_gene_types.append(line_dict["gene_types"][i])
line_dict["gene_entities"] = new_gene_entities
line_dict["gene_wordidxss"] = new_gene_wordidxss
line_dict["gene_is_corrects"] = new_gene_is_corrects
line_dict["gene_types"] = new_gene_types
# Remove the hpoterms that are unsupervised copies
supervised_idxs = set()
unsupervised_idxs = set()
for i in range(len(line_dict["hpoterm_is_corrects"])):
if line_dict["hpoterm_is_corrects"][i] == "n":
unsupervised_idxs.add(i)
else:
supervised_idxs.add(i)
survived_unsuperv_idxs = set()
for i in unsupervised_idxs:
wordidxs = line_dict["hpoterm_wordidxss"][i]
found = False
for j in supervised_idxs:
if line_dict["hpoterm_wordidxss"][j] == wordidxs:
found = True
break
if not found:
survived_unsuperv_idxs.add(i)
to_keep = sorted(survived_unsuperv_idxs | supervised_idxs)
new_hpoterm_entities = []
new_hpoterm_wordidxss = []
new_hpoterm_is_corrects = []
new_hpoterm_types = []
for i in to_keep:
new_hpoterm_entities.append(line_dict["hpoterm_entities"][i])
new_hpoterm_wordidxss.append(line_dict["hpoterm_wordidxss"][i])
new_hpoterm_is_corrects.append(
line_dict["hpoterm_is_corrects"][i])
new_hpoterm_types.append(line_dict["hpoterm_types"][i])
line_dict["hpoterm_entities"] = new_hpoterm_entities
line_dict["hpoterm_wordidxss"] = new_hpoterm_wordidxss
line_dict["hpoterm_is_corrects"] = new_hpoterm_is_corrects
line_dict["hpoterm_types"] = new_hpoterm_types
# Create the sentence object where the two mentions appear
sentence = Sentence(
line_dict["doc_id"], line_dict["sent_id"],
line_dict["wordidxs"], line_dict["words"], line_dict["poses"],
line_dict["ners"], line_dict["lemmas"], line_dict["dep_paths"],
line_dict["dep_parents"], line_dict["bounding_boxes"])
# Skip weird sentences
if sentence.is_weird():
continue
gene_mentions = []
hpoterm_mentions = []
positive_relations = []
gene_wordidxs = set()
hpoterm_wordidxs = set()
# Iterate over each pair of (gene,phenotype) mentions
for g_idx in range(len(line_dict["gene_is_corrects"])):
g_wordidxs = TSVstring2list(
line_dict["gene_wordidxss"][g_idx], int)
for idx in g_wordidxs:
gene_wordidxs.add(idx)
gene_mention = Mention(
"GENE", line_dict["gene_entities"][g_idx],
[sentence.words[j] for j in g_wordidxs])
if line_dict["gene_is_corrects"][g_idx] == "n":
gene_mention.is_correct = None
elif line_dict["gene_is_corrects"][g_idx] == "f":
gene_mention.is_correct = False
elif line_dict["gene_is_corrects"][g_idx] == "t":
gene_mention.is_correct = True
else:
assert False
gene_mention.type = line_dict["gene_types"][g_idx]
assert not gene_mention.type.endswith("_UNSUP")
gene_mentions.append(gene_mention)
for h_idx in range(len(line_dict["hpoterm_is_corrects"])):
h_wordidxs = TSVstring2list(
line_dict["hpoterm_wordidxss"][h_idx], int)
for idx in h_wordidxs:
hpoterm_wordidxs.add(idx)
hpoterm_mention = Mention(
"hpoterm", line_dict["hpoterm_entities"][h_idx],
[sentence.words[j] for j in h_wordidxs])
if line_dict["hpoterm_is_corrects"][h_idx] == "n":
hpoterm_mention.is_correct = None
elif line_dict["hpoterm_is_corrects"][h_idx] == "f":
hpoterm_mention.is_correct = False
elif line_dict["hpoterm_is_corrects"][h_idx] == "t":
hpoterm_mention.is_correct = True
else:
assert False
hpoterm_mention.type = line_dict["hpoterm_types"][h_idx]
assert not hpoterm_mention.type.endswith("_UNSUP")
hpoterm_mentions.append(hpoterm_mention)
# Skip if the word indexes overlab
if set(g_wordidxs) & set(h_wordidxs):
continue
# Skip if the mentions are too far away
gene_start = gene_mention.wordidxs[0]
hpoterm_start = hpoterm_mention.wordidxs[0]
gene_end = gene_mention.wordidxs[-1]
hpoterm_end = hpoterm_mention.wordidxs[-1]
limits = sorted(
(gene_start, hpoterm_start, gene_end, hpoterm_end))
start = limits[0]
betw_start = limits[1]
betw_end = limits[2]
if betw_end - betw_start > 50:
continue
relation = Relation(
"GENEPHENO", gene_mention, hpoterm_mention)
# Supervise
supervise(relation, gene_mention, hpoterm_mention,
sentence)
if relation.is_correct:
positive_relations.append(
(gene_mention, hpoterm_mention))
# Print!
print(relation.tsv_dump())
# Create some artificial negative examples:
# for each (gene, phenotype) pair that is labelled as positive
# example, select one word w in the same sentence that (1) is not a
# gene mention candidate and (2) is not a phenotype mention
# candidate, add (gene, w) and (w, phenotype) as negative example
avail_wordidxs = (
set(line_dict["wordidxs"]) - set(hpoterm_wordidxs)) - \
set(gene_wordidxs)
avail_wordidxs = list(avail_wordidxs)
if len(avail_wordidxs) > 0:
fake_rels = []
for (gene_mention, hpoterm_mention) in positive_relations:
other_word = sentence.words[random.choice(avail_wordidxs)]
fake_gene_mention = Mention(
"FAKE_GENE", other_word.lemma, [other_word, ])
fake_hpo_mention = Mention(
"FAKE_HPOTERM", other_word.lemma, [other_word, ])
fake_rel_1 = Relation(
"GENEPHENO_SUP_POSFAKEGENE", fake_gene_mention,
hpoterm_mention)
fake_rel_2 = Relation(
"GENEPHENO_SUP_POSFAKEHPO", gene_mention,
fake_hpo_mention)
fake_rel_1.is_correct = False
fake_rel_2.is_correct = False
# Print!
print(fake_rel_1.tsv_dump())
print(fake_rel_2.tsv_dump())
# Create more artificial negative examples:
# for each gene candidate G in the sentence, if the pattern G
# <Verb> X appears in the same sentence and X is not a phenotype
# mention candidate, add (gene, X) as negative examples
for gene_mention in gene_mentions:
try:
next_word = sentence.words[gene_mention.wordidxs[-1] + 1]
except IndexError:
continue
if re.search('^VB[A-Z]*$', next_word.pos) and \
next_word.word not in ["{", "}", "(", ")", "[", "]"]:
try:
after_next_word = sentence.words[
next_word.in_sent_idx + 1]
except IndexError:
continue
if after_next_word.in_sent_idx in hpoterm_wordidxs:
continue
fake_hpo_mention = Mention(
"FAKE_HPOTERM", after_next_word.lemma,
[after_next_word, ])
fake_rel = Relation(
"GENEPHENO_SUP_FAKEHPO", gene_mention,
fake_hpo_mention)
fake_rel.is_correct = False
print(fake_rel.tsv_dump())
# Create more artificial negative examples:
# as before but for phenotypes
for hpo_mention in hpoterm_mentions:
try:
next_word = sentence.words[hpo_mention.wordidxs[-1] + 1]
except IndexError:
continue
if re.search('^VB[A-Z]*$', next_word.pos) and \
next_word.word not in ["{", "}", "(", ")", "[", "]"]:
try:
after_next_word = sentence.words[
next_word.in_sent_idx + 1]
except IndexError:
continue
if after_next_word.in_sent_idx in gene_wordidxs:
continue
fake_gene_mention = Mention(
"FAKE_GENE", after_next_word.lemma,
[after_next_word, ])
fake_rel = Relation(
"GENEPHENO_SUP_FAKEGENE", fake_gene_mention,
hpo_mention)
fake_rel.is_correct = False
print(fake_rel.tsv_dump())
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
2393,
15414,
198,
11748,
4738,
198,
11748,
302,
198,
198,
6738,
288,
7249,
13,
44,
1463,
1330,
337,
1463,
198,
6738,
288,
7249,
13,
31837,
594,
1330,
11352,
594,
19... | 1.731614 | 7,601 |
import numpy as np
from typing import Tuple
from IMLearn.metalearners.adaboost import AdaBoost
from IMLearn.learners.classifiers import DecisionStump
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from IMLearn.metrics.loss_functions import accuracy
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(noise=0)
fit_and_evaluate_adaboost(noise=0.4)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
19720,
1330,
309,
29291,
198,
6738,
314,
5805,
451,
77,
13,
28469,
451,
2741,
13,
324,
34748,
455,
1330,
47395,
45686,
198,
6738,
314,
5805,
451,
77,
13,
35720,
364,
13,
4871,
13350,
1330,
264... | 2.495108 | 511 |
"""
FitPanel class contains fields allowing to fit models and data
:note: For Fit to be performed the user should check at least one parameter
on fit Panel window.
"""
import wx
import wx.lib.newevent
from wx.aui import AuiNotebook as Notebook
import datetime
from bumps.gui.convergence_view import ConvergenceView
from bumps.gui.uncertainty_view import UncertaintyView, CorrelationView, TraceView
from bumps.dream.stats import var_stats, format_vars
from sas.sasgui.guiframe.panel_base import PanelBase
from sas.sasgui.guiframe.events import StatusEvent
(PlotResultEvent, EVT_PLOT_RESULT) = wx.lib.newevent.NewEvent()
class ResultPanel(Notebook, PanelBase):
"""
FitPanel class contains fields allowing to fit models and data
:note: For Fit to be performed the user should check at least one parameter
on fit Panel window.
"""
## Internal name for the AUI manager
window_name = "Result panel"
## Title to appear on top of the window
window_caption = "Result Panel"
CENTER_PANE = True
def __init__(self, parent, manager=None, *args, **kwargs):
"""
"""
style = ((wx.aui.AUI_NB_WINDOWLIST_BUTTON
| wx.aui.AUI_NB_DEFAULT_STYLE
| wx.CLIP_CHILDREN)
& ~wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB)
Notebook.__init__(self, parent, wx.ID_ANY, style=style)
PanelBase.__init__(self, parent)
self.frame = parent
self.Bind(EVT_PLOT_RESULT, self.on_plot_results)
self.frame.Bind(wx.EVT_CLOSE, self.on_close)
self._manager = None
| [
37811,
198,
31805,
26639,
1398,
4909,
7032,
5086,
284,
4197,
220,
4981,
290,
220,
1366,
198,
198,
25,
11295,
25,
1114,
25048,
284,
307,
6157,
262,
2836,
815,
2198,
379,
1551,
530,
11507,
198,
220,
220,
220,
319,
4197,
18810,
4324,
13,... | 2.521327 | 633 |
from random import randint
from random_word import RandomWords
random_num_list=[]
for i in range(200):
random_num_list.append(randint(0,1000))
r = RandomWords()
random_word_list=r.get_random_words()
list_type=input("Which type of list would you like to sort and search, words or numbers?")
if list_type=="words":
print(random_word_list)
search=input("What word do you want to find?")
my_list=mysort(random_word_list)
if list_type=="numbers":
print(random_num_list)
search=input("What number do you want to find?")
search=int(search)
my_list=mysort(random_num_list)
print(my_list)
result=binary(my_list,search)
if result != -1:
print("Element is present at index "+ str(result)+". This means this is element number "+str(result+1)+" in the list.")
else:
print("Element is not present in this list")
| [
6738,
4738,
1330,
43720,
600,
201,
198,
6738,
4738,
62,
4775,
1330,
14534,
37117,
201,
198,
201,
198,
25120,
62,
22510,
62,
4868,
28,
21737,
201,
198,
1640,
1312,
287,
2837,
7,
2167,
2599,
201,
198,
220,
220,
220,
4738,
62,
22510,
6... | 2.555556 | 351 |
'''
Cleanup all VMs (find from ZStack database). It is mainly for debuging.
@author: Youyk
'''
import threading
import time
import sys
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.config_operations as con_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import apibinding.inventory as inventory
thread_threshold=1000
session_uuid = None
session_to = None
session_mc = None
| [
7061,
6,
198,
198,
32657,
929,
477,
569,
10128,
220,
357,
19796,
422,
1168,
25896,
6831,
737,
632,
318,
8384,
329,
14257,
278,
13,
198,
198,
31,
9800,
25,
921,
48361,
198,
7061,
6,
198,
11748,
4704,
278,
198,
11748,
640,
198,
11748,... | 3.217391 | 207 |
from __future__ import annotations
from typing import Tuple, List
from random import randint
from time import perf_counter
import numpy as np
from common.util import argmax
from common.ticktack import *
def uct_search(init_state,
util_fn: Callable[[Tuple[int, ...]], float],
selection_criteria: str = 'max_child',
exploration_bias: float = 1. / np.sqrt(2.),
max_epochs: int = 200,
max_robust_min_epochs: int = 20):
"""
Performs Monte Carlo tree search using UCT method to find optimal next state.
Args:
init_state: initial root state from which to make choice
util_fn: score calculation function
selection_criteria: choice of 'max_child', 'robust_child', 'max_rebust_child',
or 'secure_child'
exploration_bias: bias which determines propensity to expand nodes
max_epochs: maximum number of epochs to perform
max_robust_min_epochs: number of epochs before testing max-robust selection criteria;
only applies to max-robust method
Returns:
Tuple with optimal state, estimated score, and profiling statistics
"""
# This will store some profiling statistics.
stats = {'explored_count': 0,
'visit_count': 0,
'util_fn_evals': 0,
'simulation_time': 0}
# Start profiling timer.
t_start = perf_counter()
def tree_policy(v: Node) -> Node:
"""
Traverses tree by expanding unexplored nodes, and / or selects children with maximal
UCT until terminal state is encountered.
"""
# Loop while v is non-terminal.
while len(v.state) < 9:
# Expand and return v if it is unexplored.
if len(v.a_collapsed) > 0:
return expand(v)
# Otherwise, return v's most promising child.
else:
v = best_child(v, exploration_bias)
return v
def expand(v):
"""
Expands a given node with available action and adds new child to parent.
"""
# Update profiling statistics.
stats['explored_count'] += 1
assert(len(v.a_collapsed) > 0)
# Pick a unexplored action from v.
a = v.a_collapsed.pop(0)
# Create new child for v with action a.
v_new = Node(state=v.state + (a,),
parent=v,
a_incoming=a)
# Append new node to parent's list of children.
v.children.append(v_new)
return v_new
def best_child(v: Node, c):
"""Selects child node which maximises UCT function."""
best_uct_child = (float('-inf'), None)
for child in v.children:
# Calculate average expected reward for child.
q_bar = child.q / child.n
# Calculate UCT function for child.
uct = q_bar + c * np.sqrt(2.0 * np.log(v.n) / child.n)
# Update best child.
if (uct,) > best_uct_child:
best_uct_child = (uct, child)
return best_uct_child[1]
def backup(v: Node, delta):
"""Traverses tree from child to parent, propogating count and score."""
# Iterate until root node is encountered.
while v is not None:
# Increment visit count.
v.n += 1
# Update profiling statistics.
stats['visit_count'] += 1
# Propogate score.
v.q += delta
# Go to parent next.
v = v.parent
# Handle selection criteria a.1 max-child, a.2 robust-child, and a.3 secure-child.
if selection_criteria == 'max' or 'robust' or 'secure':
root = Node(state=init_state)
# Perform MCTS algorithm main loop.
for epoch in range(max_epochs):
vl = tree_policy(root)
sim_t_start = perf_counter()
delta = default_policy(vl.state)
stats['simulation_time'] += perf_counter() - sim_t_start
backup(vl, delta)
# This helper extracts appropriate value from child depending on selection criteria.
# Return state of optimal child to caller.
optim_child = root.children[
argmax(
(crit_selector(selection_criteria, c)
for c in root.children))
]
optim_score = crit_selector(selection_criteria, optim_child)
stats['t_elapsed'] = t_start - perf_counter()
return (optim_child.state, optim_score, stats)
# Handle selection criteria b max-robust-child.
elif selection_criteria == 'max_robust':
root = Node(state=init_state)
# Perform MCTS algorithm main loop; max-robust variant.
for epoch in range(max_epochs):
vl = tree_policy(root)
delta = 1.0 - default_policy(vl.state)
backup(vl, delta)
# Start testing max-robust selection criteria after minimum epochs.
if epoch > max_robust_min_epochs:
# Determine index of child with maximum reward.
q_max = argmax((c.q for c in root.children))
# Determine index of child with maximum visit count.
n_max = argmax((c.n for c in root.children))
# If above 2 indices agree, return state of optimal node to caller.
if q_max == n_max:
optim_child = root.children[q_max]
optim_score = (optim_child.q, optim_child.n)
stats['t_elapsed'] = t_start - perf_counter()
return (optim_child.state, optim_score, stats)
# Selection criteria is invalid.
else:
# Throw exception.
raise ValueError(
'selection_criteria must be one of \'max\', \'robust\', \'max_robust\', or \'secure\'.')
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
6738,
19720,
1330,
309,
29291,
11,
7343,
198,
6738,
4738,
1330,
43720,
600,
198,
6738,
640,
1330,
23035,
62,
24588,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2219,
13,
22602,
1330,
1822,
9... | 2.206067 | 2,703 |
from sqlalchemy.orm import backref
from sqlalchemy.orm import deferred
from db import db
from model.dataset_model import Dataset
from sqlalchemy.dialects.postgresql import JSONB
class Network(db.Model):
"""
Network data class
"""
# db table name
__tablename__ = 'network'
# realtionship to dataset table
dataset_id = db.Column(db.Integer, db.ForeignKey('dataset.id'), primary_key=True, nullable=False)
dataset = db.relationship(Dataset, backref=backref("network", cascade="all, delete-orphan"))
# columns
network_id = db.Column(db.Integer, primary_key=True, nullable=False)
name = db.Column(db.String(255), nullable=False)
network = deferred(db.Column(JSONB))
metric_distance = db.Column(db.Float)
speed = db.Column(db.Float)
acceleration = db.Column(db.Float)
distance_centroid = db.Column(db.Float)
direction = db.Column(db.Float)
euclidean_distance = db.Column(db.Float)
finished = db.Column(db.Boolean, default=False)
error = db.Column(db.Boolean, default=False)
status = db.Column(db.String(255))
metric = db.Column(db.String(255), nullable=False, default='euclidean')
hierarchy = deferred(db.Column(JSONB))
| [
6738,
44161,
282,
26599,
13,
579,
1330,
736,
5420,
201,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
28651,
201,
198,
201,
198,
6738,
20613,
1330,
20613,
201,
198,
6738,
2746,
13,
19608,
292,
316,
62,
19849,
1330,
16092,
292,
316,
201... | 2.600418 | 478 |
from flask import (Blueprint, flash, request, current_app, jsonify, Response)
from warden_modules import (warden_metadata,
positions_dynamic,
generatenav, specter_df,
current_path, regenerate_nav,
home_path, transactions_fx)
from connections import tor_request
from pricing_engine.engine import price_ondate, historical_prices
from flask_login import login_required, current_user
from random import randrange
from pricing_engine.engine import fx_rate, realtime_price
from utils import heatmap_generator, pickle_it
from models import Trades, AccountInfo, TickerInfo
from datetime import datetime, timedelta
from dateutil import parser
from dateutil.relativedelta import relativedelta
import mhp as mrh
import simplejson
import logging
import pandas as pd
import numpy as np
import json
import os
import math
import csv
import requests
api = Blueprint('api', __name__)
@api.route("/gitreleases", methods=["GET"])
@login_required
@api.route("/txs_json", methods=['GET'])
@login_required
@api.route("/satoshi_quotes_json", methods=['GET'])
@login_required
# API End Point checks for wallet activity
# Gets a local pickle file and dumps - does not work with pandas df
# Do not include extension pkl on argument
@api.route("/get_pickle", methods=['GET'])
@login_required
@api.route("/check_activity", methods=['GET'])
@login_required
# API End Point with all WARden metadata
@api.route("/warden_metadata", methods=['GET'])
@login_required
# Returns a JSON with Test Response on TOR
@api.route("/testtor", methods=["GET"])
@login_required
# API End point
# Json for main page with realtime positions
@api.route("/positions_json", methods=["GET"])
@login_required
# Returns current BTC price and FX rate for current user
# This is the function used at the layout navbar to update BTC price
# Please note that the default is to update every 20s (MWT(20) above)
@ api.route("/realtime_btc", methods=["GET"])
@login_required
# API end point - cleans notifications and creates a new checkpoint
@api.route("/dismiss_notification", methods=["POST"])
@login_required
# API end point to return Specter data
# args: ?load=True (True = loads saved json, False = refresh data)
@api.route("/specter", methods=["GET"])
@login_required
# Latest Traceback message
@api.route("/traceback_error", methods=["GET"])
@login_required
# API end point
# Function returns summary statistics for portfolio NAV and values
# Main function for portfolio page
@api.route("/portstats", methods=["GET", "POST"])
@login_required
# API end point - returns a json with NAV Chartdata
@api.route("/navchartdatajson", methods=["GET", "POST"])
@login_required
# Creates a table with dates and NAV values
# API end point - returns a json with NAV Chartdata
@api.route("/stackchartdatajson", methods=["GET", "POST"])
@login_required
# Creates a table with dates and NAV values
# Return the price of a ticker on a given date
# Takes arguments:
# ticker: Single ticker for filter (default = NAV)
# date: date to get price
@api.route("/getprice_ondate", methods=["GET"])
@login_required
@api.route("/fx_lst", methods=["GET"])
@login_required
# Receiver argument ?term to return a list of fx (fiat and digital)
# Searches the list both inside the key as well as value of dict
@api.route("/heatmapbenchmark_json", methods=["GET"])
@login_required
# Return Monthly returns for Benchmark and Benchmark difference from NAV
# Takes arguments:
# ticker - single ticker for filter
@api.route("/histvol", methods=["GET", "POST"])
@login_required
# Returns a json with data to create the vol chart
# takes inputs from get:
# ticker, meta (true returns only metadata), rolling (in days)
# metadata (max, mean, etc)
@api.route("/mempool_json", methods=["GET", "POST"])
@login_required
@api.route("/portfolio_compare_json", methods=["GET"])
@login_required
# Compare portfolio performance to a list of assets
# Takes arguments:
# tickers - (comma separated. ex: BTC,ETH,AAPL)
# start - start date in the format YYMMDD
# end - end date in the format YYMMDD
# method - "chart": returns NAV only data for charts
# - "all": returns all data (prices and NAV)
# - "meta": returns metadata information
@api.route('/log')
@login_required
@api.route('/broadcaster')
@login_required
@api.route("/assetlist", methods=["GET", "POST"])
# List of available tickers. Also takes argument {term} so this can be used
# in autocomplete forms
@api.route("/aclst", methods=["GET", "POST"])
@login_required
# Returns JSON for autocomplete on account names.
# Gathers account names from trades and account_info tables
# Takes on input ?term - which is the string to be found
@api.route("/portfolio_tickers_json", methods=["GET", "POST"])
@login_required
# Returns a list of all tickers ever traded in this portfolio
@api.route("/generatenav_json", methods=["GET", "POST"])
@login_required
# Creates a table with dates and NAV values
# Takes 2 arguments:
# force=False (default) : Forces the NAV generation without reading saved file
# filter=None (default): Filter to be applied to Pandas df (df.query(filter))
| [
6738,
42903,
1330,
357,
14573,
4798,
11,
7644,
11,
220,
2581,
11,
1459,
62,
1324,
11,
220,
33918,
1958,
11,
18261,
8,
198,
6738,
266,
5872,
62,
18170,
1330,
357,
904,
268,
62,
38993,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220... | 3.110913 | 1,686 |
from user import User
users = [
User(1, 'bogdan', 's3cret'),
User(2, 'georgi', "pass")
]
user_mapping = {u.username: u for u in users}
userid_mapping = {u.id: u for u in users}
| [
6738,
2836,
1330,
11787,
198,
198,
18417,
796,
685,
198,
220,
220,
220,
11787,
7,
16,
11,
705,
65,
519,
25604,
3256,
705,
82,
18,
66,
1186,
33809,
198,
220,
220,
220,
11787,
7,
17,
11,
705,
469,
2398,
72,
3256,
366,
6603,
4943,
... | 2.25 | 84 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for FFmpeg repository.
Does the following:
- Warns users that changes must be submitted via Gerrit.
- Warns users when a change is made without updating the README file.
"""
import re
import subprocess
def _WarnIfReadmeIsUnchanged(input_api, output_api):
"""Warn if the README file hasn't been updated with change notes."""
has_ffmpeg_changes = False
chromium_re = re.compile(r'.*[/\\]?chromium.*|PRESUBMIT.py$|.*\.chromium$')
readme_re = re.compile(r'.*[/\\]?chromium[/\\]patches[/\\]README$')
for f in input_api.AffectedFiles():
if readme_re.match(f.LocalPath()):
return []
if not has_ffmpeg_changes and not chromium_re.match(f.LocalPath()):
has_ffmpeg_changes = True
if not has_ffmpeg_changes:
return []
return [output_api.PresubmitPromptWarning('\n'.join([
'FFmpeg changes detected without any update to chromium/patches/README,',
'it\'s good practice to update this file with a note about your changes.'
]))]
def _WarnIfGenerateGnTestsFail(input_api, output_api):
"""Error if generate_gn.py was changed and tests are now failing."""
should_run_tests = False
generate_gn_re = re.compile(r'.*generate_gn.*\.py$')
for f in input_api.AffectedFiles():
if generate_gn_re.match(f.LocalPath()):
should_run_tests = True
break;
errors = []
if should_run_tests:
errors += input_api.RunTests(
input_api.canned_checks.GetUnitTests(
input_api, output_api,
['chromium/scripts/generate_gn_unittest.py']))
return errors
| [
2,
15069,
357,
66,
8,
2321,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
1... | 2.748006 | 627 |
from os.path import join
from typing import Tuple
from constants import PATH, SCREEN_HEIGHT, SCREEN_WIDTH, WHITE
from pygame import Rect, Surface, display, image, mouse
from graphics.font import Font, FontType
class Screen:
""" This class is responsible for drawing elements on the screen """
__cursor: Surface = None
def __load_resources(self) -> None:
""" Pre-loads required resources like cursor texture """
self.__cursor = image.load(join(PATH, 'res', 'images', 'mouse_icon.png'))
def fill(self, color, rect: Tuple = None) -> Rect:
""" Fills screen surface with a given solid color """
return self.__canvas.fill(color, rect)
def blit(self, surface: Surface, dest: Tuple) -> Rect:
""" Draws an image onto screen surface """
return self.__canvas.blit(surface, dest)
def update_cursor(self):
""" Updates mouse cursor position """
self.blit(self.__cursor, mouse.get_pos())
return
def draw_string(self, string: str, dest: Tuple, font: FontType = FontType.MD, color: Tuple = WHITE) -> None:
""" Draws a text string onto screen surface """
text_surface = self.__font.draw_string(string, font, color)
self.blit(text_surface, dest)
| [
6738,
28686,
13,
6978,
1330,
4654,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
6738,
38491,
1330,
46490,
11,
6374,
2200,
1677,
62,
13909,
9947,
11,
6374,
2200,
1677,
62,
54,
2389,
4221,
11,
44925,
198,
6738,
12972,
6057,
1330,
48599,
... | 2.676349 | 482 |
from qoalgo.base import JoinTree, Relation, QueryGraph
| [
6738,
10662,
78,
282,
2188,
13,
8692,
1330,
15251,
27660,
11,
4718,
341,
11,
43301,
37065,
628,
628,
628
] | 3.157895 | 19 |
import time
from datetime import datetime, date, timedelta
import six as six
"""默认的时间日期格式,项目中金融时间序列等时间相关默认格式"""
K_DEFAULT_DT_FMT2 = "%Y-%m-%d"
def str_to_datetime(date_str, fmt=K_DEFAULT_DT_FMT2, fix=True):
"""
将字符串日期格式转换成datetime.datetime对象 eg. '2016-01-01' -> datetime.datetime(2016, 1, 1, 0, 0)
:param date_str: %Y-%m-%d 形式str对象,eg. '2016-01-01'
:param fmt: 如date_str不是%Y-%m-%d形式,对应的格式str对象
:param fix: 是否修复日期不规范的写法,eg. 2016-1-1 fix 2016-01-01
:return: datetime.datetime对象,eg. datetime.datetime(2016, 1, 1, 0, 0)
"""
if fix and fmt == K_DEFAULT_DT_FMT2:
# 只针对%Y-%m-%d形式格式标准化日期格式
date_str = fix_date(date_str)
return datetime.strptime(date_str, fmt)
def date_str_to_int(date_str, split='-', fix=True):
"""
eg. 2016-01-01 -> 20160101
不使用时间api,直接进行字符串解析,执行效率高
:param date_str: %Y-%m-%d形式时间str对象
:param split: 年月日的分割符,默认'-'
:param fix: 是否修复日期不规范的写法,eg. 2016-1-1 fix 2016-01-01
:return: int类型时间
"""
if fix and split == '-':
# 只针对%Y-%m-%d形式格式标准化日期格式
date_str = fix_date(date_str)
string_date = date_str.replace(split, '')
return int(string_date)
def fix_date(date_str):
"""
修复日期不规范的写法:
eg. 2016-1-1 fix 2016-01-01
eg. 2016:01-01 fix 2016-01-01
eg. 2016,01 01 fix 2016-01-01
eg. 2016/01-01 fix 2016-01-01
eg. 2016/01/01 fix 2016-01-01
eg. 2016/1/1 fix 2016-01-01
eg. 2016:1:1 fix 2016-01-01
eg. 2016 1 1 fix 2016-01-01
eg. 2016 01 01 fix 2016-01-01
.............................
不使用时间api,直接进行字符串解析,执行效率高,注意fix_date内部会使用fmt_date
:param date_str: 检测需要修复的日期str对象或者int对象
:return: 修复了的日期str对象
"""
if date_str is not None:
# 如果是字符串先统一把除了数字之外的都干掉,变成干净的数字串
if isinstance(date_str, six.string_types):
# eg, 2016:01-01, 201601-01, 2016,01 01, 2016/01-01 -> 20160101
date_str = ''.join(list(filter(lambda c: c.isdigit(), date_str)))
# 再统一确定%Y-%m-%d形式
date_str = fmt_date(date_str)
y, m, d = date_str.split('-')
if len(m) == 1:
# 月上补0
m = '0{}'.format(m)
if len(d) == 1:
# 日上补0
d = '0{}'.format(d)
date_str = "%s-%s-%s" % (y, m, d)
return date_str
def fmt_date(convert_date):
"""
将时间格式如20160101转换为2016-01-01日期格式, 注意没有对如 201611
这样的做fix适配,外部需要明确知道参数的格式,针对特定格式,不使用时间api,
直接进行字符串解析,执行效率高
:param convert_date: 时间格式如20160101所示,int类型或者str类型对象
:return: %Y-%m-%d日期格式str类型对象
"""
if isinstance(convert_date, float):
# float先转换int
convert_date = int(convert_date)
convert_date = str(convert_date)
if len(convert_date) > 8 and convert_date.startswith('20'):
# eg '20160310000000000'
convert_date = convert_date[:8]
if '-' not in convert_date:
if len(convert_date) == 8:
# 20160101 to 2016-01-01
convert_date = "%s-%s-%s" % (convert_date[0:4],
convert_date[4:6], convert_date[6:8])
elif len(convert_date) == 6:
# 201611 to 2016-01-01
convert_date = "%s-0%s-0%s" % (convert_date[0:4],
convert_date[4:5], convert_date[5:6])
else:
raise ValueError('fmt_date: convert_date fmt error {}'.format(convert_date))
return convert_date
#如:03:10AM, 10:35PM 转换成date
if __name__ == "__main__":
print(DateUtil.getTodayStr() )
print(DateUtil.getDatetimeToday())
print(DateUtil.getDatetimeYesterdayStr( DateUtil.getDatetimeToday()))
print(DateUtil.format_date('07-02 06:00'))
print(DateUtil.datetime_toString(datetime.now()))
print(DateUtil.string_toDatetime(DateUtil.format_date('07-02 06:00')))
print(DateUtil.string_toTimestamp(DateUtil.format_date('07-02 06:00')))
print(DateUtil.timestamp_toString(DateUtil.string_toTimestamp(DateUtil.format_date('07-02 06:00'))))
print(DateUtil.date_str_to_int('2007-07-07'))
print(DateUtil.date_to_millisecond(str(DateUtil.date_str_to_int('2007-07-07'))))
print(abbr_to_normal('03:35AM'))
gen = DateUtil.getNextHalfYear( DateUtil.string_toDate('2016-01-01'),DateUtil.string_toDate('2018-01-01') )
while True:
try:
end = next(gen)
print( end )
print( DateUtil.getDatetimeFutureStr(DateUtil.string_toDate(end),1) )
except StopIteration as e:
print(e)
break
| [
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
3128,
11,
28805,
12514,
198,
11748,
2237,
355,
2237,
628,
198,
37811,
165,
119,
246,
164,
106,
97,
21410,
33768,
114,
29785,
112,
33768,
98,
17312,
253,
43718,
120,
28156,
237,
... | 1.538617 | 2,978 |
from django.db import models
from mission.models import Vehicule, Conducteur
from users.models import User
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
4365,
13,
27530,
1330,
15118,
291,
2261,
11,
28579,
23365,
198,
6738,
2985,
13,
27530,
1330,
11787,
198,
2,
13610,
534,
4981,
994,
13,
628,
628,
198
] | 3.833333 | 36 |
"""
=========================================================================
MultiCacheTestCases.py
=========================================================================
Test cases for multicache configs
Author : Xiaoyu Yan (xy97), Eric Tang (et396)
Date : 13 April 2020
"""
import pytest
from test.sim_utils import (
mreq, resp, CacheReqType, CacheRespType, MemReqType, MemRespType, CacheTestParams
)
# Main test memory for dmapped tests
| [
37811,
198,
23926,
2559,
28,
198,
15237,
30562,
14402,
34,
1386,
13,
9078,
198,
23926,
2559,
28,
198,
14402,
2663,
329,
47368,
4891,
4566,
82,
198,
198,
13838,
1058,
22450,
726,
84,
10642,
357,
5431,
5607,
828,
7651,
18816,
357,
316,
... | 4.109091 | 110 |
import requests, json
from os.path import exists
from decouple import config
import logging
OPEN_WEATHER_MAP_API_URL = "https://api.openweathermap.org"
with open('cache/services.json') as json_file:
service_config = json.load(json_file)
configs = [service['configs'] for service in service_config['services'] if service['service'] == 'weather'][0]
# Request data from API
# Get data from cache file
# Get the current weather
# Get the daily forecast
| [
11748,
7007,
11,
33918,
198,
6738,
28686,
13,
6978,
1330,
7160,
198,
6738,
875,
43846,
1330,
4566,
198,
11748,
18931,
198,
198,
3185,
1677,
62,
8845,
45226,
62,
33767,
62,
17614,
62,
21886,
796,
366,
5450,
1378,
15042,
13,
9654,
23563,
... | 3.333333 | 138 |
from typing import List
from base import version
| [
198,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
2779,
1330,
2196,
628,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
220,
220,
220,
220
] | 2.176471 | 34 |
import re
txt = "hello world"
# Check if the string ends with 'world':
x = re.findall("world$", txt)
if x:
print("Yes, the string ends with 'world'")
else:
print("No match")
# Author: Bryan G
| [
11748,
302,
198,
198,
14116,
796,
366,
31373,
995,
1,
198,
198,
2,
6822,
611,
262,
4731,
5645,
351,
705,
6894,
10354,
198,
198,
87,
796,
302,
13,
19796,
439,
7203,
6894,
3,
1600,
256,
742,
8,
198,
361,
2124,
25,
198,
220,
220,
2... | 2.649351 | 77 |
from debug import logger
from messagetypes import MsgBase
| [
6738,
14257,
1330,
49706,
198,
6738,
2085,
363,
2963,
12272,
1330,
6997,
70,
14881,
198
] | 3.866667 | 15 |
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Base IO module."""
from abc import ABCMeta, abstractmethod
from core.model import Model
class BaseIO(metaclass=ABCMeta):
"""Base class for model IO."""
@abstractmethod
def read(self, path: str) -> Model:
"""Read a model.
Parameters
----------
path : str
Path to the file to be read
Returns
-------
model : Model
The model
"""
pass
@abstractmethod
def write(self, model: Model, path: str) -> None:
"""Write the model to a file.
Parameters
----------
model : Model
Model to be written
path : str
Path to the written file
"""
pass
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
2864,
383,
4518,
9437,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,... | 2.863366 | 505 |
import boto3
import json | [
11748,
275,
2069,
18,
198,
11748,
33918
] | 3.428571 | 7 |
# Generated by Django 3.0.6 on 2020-12-18 03:04
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
21,
319,
12131,
12,
1065,
12,
1507,
7643,
25,
3023,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |